Exemplo n.º 1
0
std::string get_codec_name(AVCodecContext *pCodecCtx)
{
  AVCodecID id = pCodecCtx->codec_id;

  // Grab the codec
  AVCodec *p = avcodec_find_decoder(id);
  const char *profile = p ? av_get_profile_name(p, pCodecCtx->profile) : nullptr;

  std::ostringstream codec_name;

  const char *nice_name = nullptr;
  for (int i = 0; i < countof(nice_codec_names); ++i)
  {
    if (nice_codec_names[i].id == id) {
      nice_name = nice_codec_names[i].name;
      break;
    }
  }

  if (id == AV_CODEC_ID_DTS && pCodecCtx->codec_tag == 0xA2) {
    profile = "DTS Express";
  }

  if (id == AV_CODEC_ID_H264 && profile) {
    codec_name << nice_name << " " << tolower(profile);
    if (pCodecCtx->level && pCodecCtx->level != FF_LEVEL_UNKNOWN && pCodecCtx->level < 1000) {
      char l_buf[5];
      sprintf_s(l_buf, "%.1f", pCodecCtx->level / 10.0);
      codec_name << " L" << l_buf;
    }
  } else if (id == AV_CODEC_ID_VC1 && profile) {
    codec_name << nice_name << " " << tolower(profile);
    if (pCodecCtx->level != FF_LEVEL_UNKNOWN) {
      codec_name << " L" << pCodecCtx->level;
    }
  } else if (id == AV_CODEC_ID_DTS && profile) {
    codec_name << tolower(profile);
  } else if (id == AV_CODEC_ID_JPEG2000 && profile) {
    codec_name << tolower(profile);
  } else if (nice_name) {
    codec_name << nice_name;
    if (profile)
      codec_name << " " << tolower(profile);
  } else if (p && p->name) {
    codec_name << p->name;
    if (profile)
      codec_name << " " << tolower(profile);
  } else if (pCodecCtx->codec_name[0] != '\0') {
    codec_name << pCodecCtx->codec_name;
  } else {
    /* output avi tags */
    char buf[32];
    av_get_codec_tag_string(buf, sizeof(buf), pCodecCtx->codec_tag);
    codec_name << buf;
    sprintf_s(buf, "0x%04X", pCodecCtx->codec_tag);
    codec_name  << " / " << buf;
  }
  return codec_name.str();
}
Exemplo n.º 2
0
/** Returns video codec tag (fourcc) */
wxString wxFfmpegMediaDecoder::GetCodecTag(unsigned int streamIndex) {
	if (m_formatCtx == NULL)
		return wxT("");
	AVStream *st = m_formatCtx->streams[streamIndex];
	if (st->codec == NULL || st->codec->codec_tag == 0)
		return wxT("");
	char buf[32];
	if (av_get_codec_tag_string(buf, sizeof(buf), st->codec->codec_tag) <= 0)
		return wxT("");
	return wxString(buf, wxConvLocal);
}
Exemplo n.º 3
0
static void print_pix_fmt_fourccs(enum AVPixelFormat pix_fmt, char sep)
{
    int i;

    for (i = 0; ff_raw_pix_fmt_tags[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
        if (ff_raw_pix_fmt_tags[i].pix_fmt == pix_fmt) {
            char buf[32];
            av_get_codec_tag_string(buf, sizeof(buf), ff_raw_pix_fmt_tags[i].fourcc);
            printf("%s%c", buf, sep);
        }
    }
}
Exemplo n.º 4
0
static enum AVPixelFormat ADM_LIBVA_getFormat(struct AVCodecContext *avctx,  const enum AVPixelFormat *fmt)
{
    int i;
    ADM_info("[LIBVA]: GetFormat\n");
    AVCodecID id=AV_CODEC_ID_NONE;
    AVPixelFormat c;
    AVPixelFormat outPix;
    for(i=0;fmt[i]!=AV_PIX_FMT_NONE;i++)
    {
        c=fmt[i];
        char name[300]={0};
        av_get_pix_fmt_string(name,sizeof(name),c);
        ADM_info("[LIBVA]: Evaluating PIX_FMT %d,%s\n",c,name);  
        av_get_codec_tag_string(name,sizeof(name),avctx->codec_id);
        ADM_info("\t  Evaluating codec %d,%s\n",avctx->codec_id,name);  
        
        if(c!=AV_PIX_FMT_VAAPI_VLD) continue;
#define FMT_V_CHECK(x,y)      case AV_CODEC_ID_##x:   outPix=AV_PIX_FMT_VAAPI_VLD;id=avctx->codec_id;break;
        
        
        switch(avctx->codec_id)  //AV_CODEC_ID_H265
        {
            FMT_V_CHECK(H264,H264)
            FMT_V_CHECK(H265,H265)
            FMT_V_CHECK(MPEG1VIDEO,MPEG1)
            FMT_V_CHECK(MPEG2VIDEO,MPEG2)
            FMT_V_CHECK(WMV3,WMV3)
            FMT_V_CHECK(VC1,VC1)
            FMT_V_CHECK(VP9,VP9)
            default: 
                ADM_info("No hw support for format %d\n",avctx->codec_id);
                continue;
                break;
        }
        break;
    }
    if(id==AV_CODEC_ID_NONE)
    {
        
        return AV_PIX_FMT_NONE;
    }
    // Finish intialization of LIBVA decoder
    const AVHWAccel *accel=ADM_acceleratedDecoderFF::parseHwAccel(outPix,id,AV_PIX_FMT_VAAPI_VLD);
    if(accel)
    {
        ADM_info("Found matching hw accelerator : %s\n",accel->name);
        ADM_info("Successfully setup hw accel\n");
        return AV_PIX_FMT_VAAPI_VLD;
    }
    return AV_PIX_FMT_NONE;
}
Exemplo n.º 5
0
static void control_port_cb(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
    AVCodecContext *avctx = (AVCodecContext*)port->userdata;
    MMAL_STATUS_T status;

    if (buffer->cmd == MMAL_EVENT_ERROR) {
        status = *(uint32_t *)buffer->data;
        av_log(avctx, AV_LOG_ERROR, "MMAL error %d on control port\n", (int)status);
    } else {
        char s[20];
        av_get_codec_tag_string(s, sizeof(s), buffer->cmd);
        av_log(avctx, AV_LOG_WARNING, "Unknown MMAL event %s on control port\n", s);
    }

    mmal_buffer_header_release(buffer);
}
Exemplo n.º 6
0
static int init_muxer(AVFormatContext *s, AVDictionary **options)
{
    int ret = 0, i;
    AVStream *st;
    AVDictionary *tmp = NULL;
    AVCodecContext *codec = NULL;
    AVOutputFormat *of = s->oformat;
    const AVCodecDescriptor *desc;

    if (options)
        av_dict_copy(&tmp, *options, 0);

    if ((ret = av_opt_set_dict(s, &tmp)) < 0)
        goto fail;

#if FF_API_LAVF_BITEXACT
    if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT)
        s->flags |= AVFMT_FLAG_BITEXACT;
#endif

    // some sanity checks
    if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
        av_log(s, AV_LOG_ERROR, "no streams\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }

    for (i = 0; i < s->nb_streams; i++) {
        st    = s->streams[i];
        codec = st->codec;

#if FF_API_LAVF_CODEC_TB
FF_DISABLE_DEPRECATION_WARNINGS
        if (!st->time_base.num && codec->time_base.num) {
            av_log(s, AV_LOG_WARNING, "Using AVStream.codec.time_base as a "
                   "timebase hint to the muxer is deprecated. Set "
                   "AVStream.time_base instead.\n");
            avpriv_set_pts_info(st, 64, codec->time_base.num, codec->time_base.den);
        }
FF_ENABLE_DEPRECATION_WARNINGS
#endif

        if (!st->time_base.num) {
            /* fall back on the default timebase values */
            if (codec->codec_type == AVMEDIA_TYPE_AUDIO && codec->sample_rate)
                avpriv_set_pts_info(st, 64, 1, codec->sample_rate);
            else
                avpriv_set_pts_info(st, 33, 1, 90000);
        }

        switch (codec->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            if (codec->sample_rate <= 0) {
                av_log(s, AV_LOG_ERROR, "sample rate not set\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
            if (!codec->block_align)
                codec->block_align = codec->channels *
                                     av_get_bits_per_sample(codec->codec_id) >> 3;
            break;
        case AVMEDIA_TYPE_VIDEO:
            if ((codec->width <= 0 || codec->height <= 0) &&
                !(of->flags & AVFMT_NODIMENSIONS)) {
                av_log(s, AV_LOG_ERROR, "dimensions not set\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }

            if (av_cmp_q(st->sample_aspect_ratio,
                         codec->sample_aspect_ratio)) {
                if (st->sample_aspect_ratio.num != 0 &&
                    st->sample_aspect_ratio.den != 0 &&
                    codec->sample_aspect_ratio.den != 0 &&
                    codec->sample_aspect_ratio.den != 0) {
                    av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
                            "(%d/%d) and encoder layer (%d/%d)\n",
                            st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
                            codec->sample_aspect_ratio.num,
                            codec->sample_aspect_ratio.den);
                    ret = AVERROR(EINVAL);
                    goto fail;
                }
            }
            break;
        }

        desc = avcodec_descriptor_get(codec->codec_id);
        if (desc && desc->props & AV_CODEC_PROP_REORDER)
            st->internal->reorder = 1;

        if (of->codec_tag) {
            if (codec->codec_tag &&
                codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
                !av_codec_get_tag(of->codec_tag, codec->codec_id) &&
                !validate_codec_tag(s, st)) {
                // the current rawvideo encoding system ends up setting
                // the wrong codec_tag for avi, we override it here
                codec->codec_tag = 0;
            }
            if (codec->codec_tag) {
                if (!validate_codec_tag(s, st)) {
                    char tagbuf[32];
                    av_get_codec_tag_string(tagbuf, sizeof(tagbuf), codec->codec_tag);
                    av_log(s, AV_LOG_ERROR,
                           "Tag %s/0x%08x incompatible with output codec id '%d'\n",
                           tagbuf, codec->codec_tag, codec->codec_id);
                    ret = AVERROR_INVALIDDATA;
                    goto fail;
                }
            } else
                codec->codec_tag = av_codec_get_tag(of->codec_tag, codec->codec_id);
        }

        if (of->flags & AVFMT_GLOBALHEADER &&
            !(codec->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
            av_log(s, AV_LOG_WARNING,
                   "Codec for stream %d does not use global headers "
                   "but container format requires global headers\n", i);

        if (codec->codec_type != AVMEDIA_TYPE_ATTACHMENT)
            s->internal->nb_interleaved_streams++;
    }

    if (!s->priv_data && of->priv_data_size > 0) {
        s->priv_data = av_mallocz(of->priv_data_size);
        if (!s->priv_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        if (of->priv_class) {
            *(const AVClass **)s->priv_data = of->priv_class;
            av_opt_set_defaults(s->priv_data);
            if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
                goto fail;
        }
    }

    /* set muxer identification string */
    if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
        av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
    }

    if (options) {
         av_dict_free(options);
         *options = tmp;
    }

    return 0;

fail:
    av_dict_free(&tmp);
    return ret;
}
Exemplo n.º 7
0
static void show_stream(AVFormatContext *fmt_ctx, int stream_idx)
{
    AVStream *stream = fmt_ctx->streams[stream_idx];
    AVCodecContext *dec_ctx;
    AVCodec *dec;
    char val_str[128];
    AVDictionaryEntry *tag = NULL;
    AVRational display_aspect_ratio;

    printf("[STREAM]\n");

    printf("index=%d\n",        stream->index);

    if ((dec_ctx = stream->codec)) {
        if ((dec = dec_ctx->codec)) {
            printf("codec_name=%s\n",         dec->name);
            printf("codec_long_name=%s\n",    dec->long_name);
        } else {
            printf("codec_name=unknown\n");
        }

        printf("codec_type=%s\n",         media_type_string(dec_ctx->codec_type));
        printf("codec_time_base=%d/%d\n", dec_ctx->time_base.num, dec_ctx->time_base.den);

        /* print AVI/FourCC tag */
        av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
        printf("codec_tag_string=%s\n", val_str);
        printf("codec_tag=0x%04x\n", dec_ctx->codec_tag);

        switch (dec_ctx->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            printf("width=%d\n",                   dec_ctx->width);
            printf("height=%d\n",                  dec_ctx->height);
            printf("has_b_frames=%d\n",            dec_ctx->has_b_frames);
            if (dec_ctx->sample_aspect_ratio.num) {
                printf("sample_aspect_ratio=%d:%d\n", dec_ctx->sample_aspect_ratio.num,
                                                      dec_ctx->sample_aspect_ratio.den);
                av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
                          dec_ctx->width  * dec_ctx->sample_aspect_ratio.num,
                          dec_ctx->height * dec_ctx->sample_aspect_ratio.den,
                          1024*1024);
                printf("display_aspect_ratio=%d:%d\n", display_aspect_ratio.num,
                                                       display_aspect_ratio.den);
            }
            printf("pix_fmt=%s\n",                 dec_ctx->pix_fmt != PIX_FMT_NONE ?
                   av_pix_fmt_descriptors[dec_ctx->pix_fmt].name : "unknown");
            break;

        case AVMEDIA_TYPE_AUDIO:
            printf("sample_rate=%s\n",             value_string(val_str, sizeof(val_str),
                                                                dec_ctx->sample_rate,
                                                                unit_hertz_str));
            printf("channels=%d\n",                dec_ctx->channels);
            printf("bits_per_sample=%d\n",         av_get_bits_per_sample(dec_ctx->codec_id));
            break;
        }
    } else {
        printf("codec_type=unknown\n");
    }

    if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS)
        printf("id=0x%x\n", stream->id);
    printf("r_frame_rate=%d/%d\n",         stream->r_frame_rate.num,   stream->r_frame_rate.den);
    printf("avg_frame_rate=%d/%d\n",       stream->avg_frame_rate.num, stream->avg_frame_rate.den);
    printf("time_base=%d/%d\n",            stream->time_base.num,      stream->time_base.den);
    printf("start_time=%s\n",   time_value_string(val_str, sizeof(val_str), stream->start_time,
                                                  &stream->time_base));
    printf("duration=%s\n",     time_value_string(val_str, sizeof(val_str), stream->duration,
                                                  &stream->time_base));
    if (stream->nb_frames)
        printf("nb_frames=%"PRId64"\n",    stream->nb_frames);

    while ((tag = av_dict_get(stream->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
        printf("TAG:%s=%s\n", tag->key, tag->value);

    printf("[/STREAM]\n");
}
Exemplo n.º 8
0
static void show_stream(AVFormatContext *fmt_ctx, int stream_idx)
{
    AVStream *stream = fmt_ctx->streams[stream_idx];
    AVCodecContext *dec_ctx;
    const AVCodec *dec;
    const char *profile;
    char val_str[128];
    AVRational display_aspect_ratio, *sar = NULL;
    const AVPixFmtDescriptor *desc;

    probe_object_header("stream");

    probe_int("index", stream->index);

    if ((dec_ctx = stream->codec)) {
        if ((dec = dec_ctx->codec)) {
            probe_str("codec_name", dec->name);
            probe_str("codec_long_name", dec->long_name);
        } else {
            probe_str("codec_name", "unknown");
        }

        probe_str("codec_type", media_type_string(dec_ctx->codec_type));
        probe_str("codec_time_base",
                  rational_string(val_str, sizeof(val_str),
                                  "/", &dec_ctx->time_base));

        /* print AVI/FourCC tag */
        av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
        probe_str("codec_tag_string", val_str);
        probe_str("codec_tag", tag_string(val_str, sizeof(val_str),
                                          dec_ctx->codec_tag));

        /* print profile, if there is one */
        if (dec && (profile = av_get_profile_name(dec, dec_ctx->profile)))
            probe_str("profile", profile);

        switch (dec_ctx->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            probe_int("width", dec_ctx->width);
            probe_int("height", dec_ctx->height);
            probe_int("coded_width", dec_ctx->coded_width);
            probe_int("coded_height", dec_ctx->coded_height);
            probe_int("has_b_frames", dec_ctx->has_b_frames);
            if (dec_ctx->sample_aspect_ratio.num)
                sar = &dec_ctx->sample_aspect_ratio;
            else if (stream->sample_aspect_ratio.num)
                sar = &stream->sample_aspect_ratio;

            if (sar) {
                probe_str("sample_aspect_ratio",
                          rational_string(val_str, sizeof(val_str), ":", sar));
                av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
                          dec_ctx->width  * sar->num, dec_ctx->height * sar->den,
                          1024*1024);
                probe_str("display_aspect_ratio",
                          rational_string(val_str, sizeof(val_str), ":",
                          &display_aspect_ratio));
            }
            desc = av_pix_fmt_desc_get(dec_ctx->pix_fmt);
            probe_str("pix_fmt", desc ? desc->name : "unknown");
            probe_int("level", dec_ctx->level);

            probe_str("color_range", av_color_range_name(dec_ctx->color_range));
            probe_str("color_space", av_color_space_name(dec_ctx->colorspace));
            probe_str("color_trc", av_color_transfer_name(dec_ctx->color_trc));
            probe_str("color_pri", av_color_primaries_name(dec_ctx->color_primaries));
            probe_str("chroma_loc", av_chroma_location_name(dec_ctx->chroma_sample_location));
            break;

        case AVMEDIA_TYPE_AUDIO:
            probe_str("sample_rate",
                      value_string(val_str, sizeof(val_str),
                                   dec_ctx->sample_rate,
                                   unit_hertz_str));
            probe_int("channels", dec_ctx->channels);
            probe_int("bits_per_sample",
                      av_get_bits_per_sample(dec_ctx->codec_id));
            break;
        }
    } else {
        probe_str("codec_type", "unknown");
    }

    if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS)
        probe_int("id", stream->id);
    probe_str("avg_frame_rate",
              rational_string(val_str, sizeof(val_str), "/",
              &stream->avg_frame_rate));
    if (dec_ctx->bit_rate)
        probe_str("bit_rate",
                  value_string(val_str, sizeof(val_str),
                               dec_ctx->bit_rate, unit_bit_per_second_str));
    probe_str("time_base",
              rational_string(val_str, sizeof(val_str), "/",
              &stream->time_base));
    probe_str("start_time",
              time_value_string(val_str, sizeof(val_str),
                                stream->start_time, &stream->time_base));
    probe_str("duration",
              time_value_string(val_str, sizeof(val_str),
                                stream->duration, &stream->time_base));
    if (stream->nb_frames)
        probe_int("nb_frames", stream->nb_frames);

    probe_dict(stream->metadata, "tags");

    if (stream->nb_side_data) {
        int i, j;
        probe_object_header("sidedata");
        for (i = 0; i < stream->nb_side_data; i++) {
            const AVPacketSideData* sd = &stream->side_data[i];
            switch (sd->type) {
            case AV_PKT_DATA_DISPLAYMATRIX:
                probe_object_header("displaymatrix");
                probe_array_header("matrix", 1);
                for (j = 0; j < 9; j++)
                    probe_int(NULL, ((int32_t *)sd->data)[j]);
                probe_array_footer("matrix", 1);
                probe_int("rotation",
                          av_display_rotation_get((int32_t *)sd->data));
                probe_object_footer("displaymatrix");
                break;
            }
        }
        probe_object_footer("sidedata");
    }

    probe_object_footer("stream");
}
Exemplo n.º 9
0
static int apng_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    APNGDemuxContext *ctx = s->priv_data;
    int ret;
    int64_t size;
    AVIOContext *pb = s->pb;
    uint32_t len, tag;

    /*
     * fcTL chunk length, in bytes:
     *  4 (length)
     *  4 (tag)
     * 26 (actual chunk)
     *  4 (crc) bytes
     * and needed next:
     *  4 (length)
     *  4 (tag (must be fdAT or IDAT))
     */
    /* if num_play is not 1, then the seekback is already guaranteed */
    if (ctx->num_play == 1 && (ret = ffio_ensure_seekback(pb, 46)) < 0)
        return ret;

    len = avio_rb32(pb);
    tag = avio_rl32(pb);
    switch (tag) {
    case MKTAG('f', 'c', 'T', 'L'):
        if (len != 26)
            return AVERROR_INVALIDDATA;

        if ((ret = decode_fctl_chunk(s, ctx, pkt)) < 0)
            return ret;

        /* fcTL must precede fdAT or IDAT */
        len = avio_rb32(pb);
        tag = avio_rl32(pb);
        if (len > 0x7fffffff ||
            tag != MKTAG('f', 'd', 'A', 'T') &&
            tag != MKTAG('I', 'D', 'A', 'T'))
            return AVERROR_INVALIDDATA;

        size = 38 /* fcTL */ + 8 /* len, tag */ + len + 4 /* crc */;
        if (size > INT_MAX)
            return AVERROR(EINVAL);

        if ((ret = avio_seek(pb, -46, SEEK_CUR)) < 0 ||
            (ret = av_append_packet(pb, pkt, size)) < 0)
            return ret;

        if (ctx->num_play == 1 && (ret = ffio_ensure_seekback(pb, 8)) < 0)
            return ret;

        len = avio_rb32(pb);
        tag = avio_rl32(pb);
        while (tag &&
               tag != MKTAG('f', 'c', 'T', 'L') &&
               tag != MKTAG('I', 'E', 'N', 'D')) {
            if (len > 0x7fffffff)
                return AVERROR_INVALIDDATA;
            if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0 ||
                (ret = av_append_packet(pb, pkt, len + 12)) < 0)
                return ret;
            if (ctx->num_play == 1 && (ret = ffio_ensure_seekback(pb, 8)) < 0)
                return ret;
            len = avio_rb32(pb);
            tag = avio_rl32(pb);
        }
        if ((ret = avio_seek(pb, -8, SEEK_CUR)) < 0)
            return ret;

        if (ctx->is_key_frame)
            pkt->flags |= AV_PKT_FLAG_KEY;
        return ret;
    case MKTAG('I', 'E', 'N', 'D'):
        ctx->cur_loop++;
        if (ctx->ignore_loop || ctx->num_play >= 1 && ctx->cur_loop == ctx->num_play) {
            avio_seek(pb, -8, SEEK_CUR);
            return AVERROR_EOF;
        }
        if ((ret = avio_seek(pb, s->streams[0]->codec->extradata_size + 8, SEEK_SET)) < 0)
            return ret;
        return 0;
    default:
        {
        char tag_buf[5];

        av_get_codec_tag_string(tag_buf, sizeof(tag_buf), tag);
        avpriv_request_sample(s, "In-stream tag=%s (0x%08X) len=%"PRIu32, tag_buf, tag, len);
        avio_skip(pb, len + 4);
        }
    }

    /* Handle the unsupported yet cases */
    return AVERROR_PATCHWELCOME;
}
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VTContext  *vt = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;
    char codec_str[32];

    av_frame_unref(vt->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    case kCVPixelFormatType_32BGRA:           vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
    default:
        av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
        av_log(NULL, AV_LOG_ERROR,
               "%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
        return AVERROR(ENOSYS);
    }

    vt->tmp_frame->width  = frame->width;
    vt->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vt->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
                  (const uint8_t **)data, linesize, vt->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vt->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vt->tmp_frame);

    return 0;
}
Exemplo n.º 11
0
static av_cold int ffmmal_init_decoder(AVCodecContext *avctx)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    MMAL_STATUS_T status;
    MMAL_ES_FORMAT_T *format_in;
    MMAL_COMPONENT_T *decoder;
    char tmp[32];
    int ret = 0;

    bcm_host_init();

    if (mmal_vc_init()) {
        av_log(avctx, AV_LOG_ERROR, "Cannot initialize MMAL VC driver!\n");
        return AVERROR(ENOSYS);
    }

    if ((ret = ff_get_format(avctx, avctx->codec->pix_fmts)) < 0)
        return ret;

    avctx->pix_fmt = ret;

    if ((status = mmal_component_create(MMAL_COMPONENT_DEFAULT_VIDEO_DECODER, &ctx->decoder)))
        goto fail;

    decoder = ctx->decoder;

    format_in = decoder->input[0]->format;
    format_in->type = MMAL_ES_TYPE_VIDEO;
    switch (avctx->codec_id) {
    case AV_CODEC_ID_MPEG2VIDEO:
        format_in->encoding = MMAL_ENCODING_MP2V;
        break;
    case AV_CODEC_ID_MPEG4:
        format_in->encoding = MMAL_ENCODING_MP4V;
        break;
    case AV_CODEC_ID_VC1:
        format_in->encoding = MMAL_ENCODING_WVC1;
        break;
    case AV_CODEC_ID_H264:
    default:
        format_in->encoding = MMAL_ENCODING_H264;
        break;
    }
    format_in->es->video.width = FFALIGN(avctx->width, 32);
    format_in->es->video.height = FFALIGN(avctx->height, 16);
    format_in->es->video.crop.width = avctx->width;
    format_in->es->video.crop.height = avctx->height;
    format_in->es->video.frame_rate.num = 24000;
    format_in->es->video.frame_rate.den = 1001;
    format_in->es->video.par.num = avctx->sample_aspect_ratio.num;
    format_in->es->video.par.den = avctx->sample_aspect_ratio.den;
    format_in->flags = MMAL_ES_FORMAT_FLAG_FRAMED;

    av_get_codec_tag_string(tmp, sizeof(tmp), format_in->encoding);
    av_log(avctx, AV_LOG_DEBUG, "Using MMAL %s encoding.\n", tmp);

#if HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS
    if (mmal_port_parameter_set_uint32(decoder->input[0], MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS,
                                       -1 - ctx->extra_decoder_buffers)) {
        av_log(avctx, AV_LOG_WARNING, "Could not set input buffering limit.\n");
    }
#endif

    if ((status = mmal_port_format_commit(decoder->input[0])))
        goto fail;

    decoder->input[0]->buffer_num =
        FFMAX(decoder->input[0]->buffer_num_min, 20);
    decoder->input[0]->buffer_size =
        FFMAX(decoder->input[0]->buffer_size_min, 512 * 1024);
    ctx->pool_in = mmal_pool_create(decoder->input[0]->buffer_num, 0);
    if (!ctx->pool_in) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    if ((ret = ffmal_update_format(avctx)) < 0)
        goto fail;

    ctx->queue_decoded_frames = mmal_queue_create();
    if (!ctx->queue_decoded_frames)
        goto fail;

    decoder->input[0]->userdata = (void*)avctx;
    decoder->output[0]->userdata = (void*)avctx;
    decoder->control->userdata = (void*)avctx;

    if ((status = mmal_port_enable(decoder->control, control_port_cb)))
        goto fail;
    if ((status = mmal_port_enable(decoder->input[0], input_callback)))
        goto fail;
    if ((status = mmal_port_enable(decoder->output[0], output_callback)))
        goto fail;

    if ((status = mmal_component_enable(decoder)))
        goto fail;

    return 0;

fail:
    ffmmal_close_decoder(avctx);
    return ret < 0 ? ret : AVERROR_UNKNOWN;
}
Exemplo n.º 12
0
static int rsd_read_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    int i, ret, version, start = 0x800;
    AVCodecContext *codec;
    AVStream *st = avformat_new_stream(s, NULL);

    if (!st)
        return AVERROR(ENOMEM);

    avio_skip(pb, 3); // "RSD"
    version = avio_r8(pb) - '0';

    codec = st->codec;
    codec->codec_type = AVMEDIA_TYPE_AUDIO;
    codec->codec_tag  = avio_rl32(pb);
    codec->codec_id   = ff_codec_get_id(rsd_tags, codec->codec_tag);
    if (!codec->codec_id) {
        char tag_buf[32];

        av_get_codec_tag_string(tag_buf, sizeof(tag_buf), codec->codec_tag);
        for (i=0; i < FF_ARRAY_ELEMS(rsd_unsupported_tags); i++) {
            if (codec->codec_tag == rsd_unsupported_tags[i]) {
                avpriv_request_sample(s, "Codec tag: %s", tag_buf);
                return AVERROR_PATCHWELCOME;
            }
        }
        av_log(s, AV_LOG_ERROR, "Unknown codec tag: %s\n", tag_buf);
        return AVERROR_INVALIDDATA;
    }

    codec->channels = avio_rl32(pb);
    if (codec->channels <= 0 || codec->channels > INT_MAX / 36) {
        av_log(s, AV_LOG_ERROR, "Invalid number of channels: %d\n", codec->channels);
        return AVERROR_INVALIDDATA;
    }

    avio_skip(pb, 4); // Bit depth
    codec->sample_rate = avio_rl32(pb);
    if (!codec->sample_rate)
        return AVERROR_INVALIDDATA;

    avio_skip(pb, 4); // Unknown

    switch (codec->codec_id) {
    case AV_CODEC_ID_XMA2:
        codec->block_align = 2048;
        ff_alloc_extradata(codec, 34);
        if (!codec->extradata)
            return AVERROR(ENOMEM);
        memset(codec->extradata, 0, 34);
        break;
    case AV_CODEC_ID_ADPCM_PSX:
        codec->block_align = 16 * codec->channels;
        if (pb->seekable)
            st->duration = av_get_audio_frame_duration(codec, avio_size(pb) - start);
        break;
    case AV_CODEC_ID_ADPCM_IMA_RAD:
        codec->block_align = 20 * codec->channels;
        if (pb->seekable)
            st->duration = av_get_audio_frame_duration(codec, avio_size(pb) - start);
        break;
    case AV_CODEC_ID_ADPCM_IMA_WAV:
        if (version == 2)
            start = avio_rl32(pb);

        codec->bits_per_coded_sample = 4;
        codec->block_align = 36 * codec->channels;
        if (pb->seekable)
            st->duration = av_get_audio_frame_duration(codec, avio_size(pb) - start);
        break;
    case AV_CODEC_ID_ADPCM_THP_LE:
        /* RSD3GADP is mono, so only alloc enough memory
           to store the coeff table for a single channel. */

        start = avio_rl32(pb);

        if ((ret = ff_get_extradata(codec, s->pb, 32)) < 0)
            return ret;
        if (pb->seekable)
            st->duration = av_get_audio_frame_duration(codec, avio_size(pb) - start);
        break;
    case AV_CODEC_ID_ADPCM_THP:
        codec->block_align = 8 * codec->channels;
        avio_skip(s->pb, 0x1A4 - avio_tell(s->pb));

        if ((ret = ff_alloc_extradata(st->codec, 32 * st->codec->channels)) < 0)
            return ret;

        for (i = 0; i < st->codec->channels; i++) {
            avio_read(s->pb, st->codec->extradata + 32 * i, 32);
            avio_skip(s->pb, 8);
        }
        if (pb->seekable)
            st->duration = (avio_size(pb) - start) / (8 * st->codec->channels) * 14;
        break;
    case AV_CODEC_ID_PCM_S16LE:
    case AV_CODEC_ID_PCM_S16BE:
        if (version != 4)
            start = avio_rl32(pb);

        if (pb->seekable)
            st->duration = (avio_size(pb) - start) / 2 / codec->channels;
        break;
    }

    avio_skip(pb, start - avio_tell(pb));
    if (codec->codec_id == AV_CODEC_ID_XMA2) {
        avio_skip(pb, avio_rb32(pb) + avio_rb32(pb));
        st->duration = avio_rb32(pb);
    }

    avpriv_set_pts_info(st, 64, 1, codec->sample_rate);

    return 0;
}
Exemplo n.º 13
0
/* Returns the number of sound data frames or negative on error */
static int get_aiff_header(AVFormatContext *s, int size,
                           unsigned version)
{
    AVIOContext *pb        = s->pb;
    AVCodecContext *codec  = s->streams[0]->codec;
    AIFFInputContext *aiff = s->priv_data;
    int exp;
    uint64_t val;
    int sample_rate;
    unsigned int num_frames;

    if (size & 1)
        size++;
    codec->codec_type = AVMEDIA_TYPE_AUDIO;
    codec->channels = avio_rb16(pb);
    num_frames = avio_rb32(pb);
    codec->bits_per_coded_sample = avio_rb16(pb);

    exp = avio_rb16(pb) - 16383 - 63;
    val = avio_rb64(pb);
    if (exp <-63 || exp >63) {
        av_log(s, AV_LOG_ERROR, "exp %d is out of range\n", exp);
        return AVERROR_INVALIDDATA;
    }
    if (exp >= 0)
        sample_rate = val << exp;
    else
        sample_rate = (val + (1ULL<<(-exp-1))) >> -exp;
    codec->sample_rate = sample_rate;
    size -= 18;

    /* get codec id for AIFF-C */
    if (size < 4) {
        version = AIFF;
    } else if (version == AIFF_C_VERSION1) {
        codec->codec_tag = avio_rl32(pb);
        codec->codec_id  = ff_codec_get_id(ff_codec_aiff_tags, codec->codec_tag);
        if (codec->codec_id == AV_CODEC_ID_NONE) {
            char tag[32];
            av_get_codec_tag_string(tag, sizeof(tag), codec->codec_tag);
            avpriv_request_sample(s, "unknown or unsupported codec tag: %s", tag);
        }
        size -= 4;
    }

    if (version != AIFF_C_VERSION1 || codec->codec_id == AV_CODEC_ID_PCM_S16BE) {
        codec->codec_id = aiff_codec_get_id(codec->bits_per_coded_sample);
        codec->bits_per_coded_sample = av_get_bits_per_sample(codec->codec_id);
        aiff->block_duration = 1;
    } else {
        switch (codec->codec_id) {
        case AV_CODEC_ID_PCM_F32BE:
        case AV_CODEC_ID_PCM_F64BE:
        case AV_CODEC_ID_PCM_S16LE:
        case AV_CODEC_ID_PCM_ALAW:
        case AV_CODEC_ID_PCM_MULAW:
            aiff->block_duration = 1;
            break;
        case AV_CODEC_ID_ADPCM_IMA_QT:
            codec->block_align = 34*codec->channels;
            break;
        case AV_CODEC_ID_MACE3:
            codec->block_align = 2*codec->channels;
            break;
        case AV_CODEC_ID_ADPCM_G726LE:
            codec->bits_per_coded_sample = 5;
        case AV_CODEC_ID_ADPCM_IMA_WS:
        case AV_CODEC_ID_ADPCM_G722:
        case AV_CODEC_ID_MACE6:
        case AV_CODEC_ID_SDX2_DPCM:
            codec->block_align = 1*codec->channels;
            break;
        case AV_CODEC_ID_GSM:
            codec->block_align = 33;
            break;
        default:
            aiff->block_duration = 1;
            break;
        }
        if (codec->block_align > 0)
            aiff->block_duration = av_get_audio_frame_duration(codec,
                                   codec->block_align);
    }

    /* Block align needs to be computed in all cases, as the definition
     * is specific to applications -> here we use the WAVE format definition */
    if (!codec->block_align)
        codec->block_align = (av_get_bits_per_sample(codec->codec_id) * codec->channels) >> 3;

    if (aiff->block_duration) {
        codec->bit_rate = codec->sample_rate * (codec->block_align << 3) /
                          aiff->block_duration;
    }

    /* Chunk is over */
    if (size)
        avio_skip(pb, size);

    return num_frames;
}
Exemplo n.º 14
0
std::string get_codec_name(const AVCodecParameters *par)
{
  AVCodecID id = par->codec_id;

  // Grab the codec
  const AVCodec *p = avcodec_find_decoder(id);
  const AVCodecDescriptor *desc = avcodec_descriptor_get(id);
  const char *profile = avcodec_profile_name(id, par->profile);

  std::ostringstream codec_name;

  const char *nice_name = nullptr;
  for (int i = 0; i < countof(nice_codec_names); ++i)
  {
    if (nice_codec_names[i].id == id) {
      nice_name = nice_codec_names[i].name;
      break;
    }
  }

  if (id == AV_CODEC_ID_DTS && par->codec_tag == 0xA2) {
    profile = "DTS Express";
  }

  if (id == AV_CODEC_ID_H264 && profile) {
    codec_name << nice_name << " " << tolower(profile);
    if (par->level && par->level != FF_LEVEL_UNKNOWN && par->level < 1000) {
      char l_buf[5];
      sprintf_s(l_buf, "%.1f", par->level / 10.0);
      codec_name << " L" << l_buf;
    }
  } else if (id == AV_CODEC_ID_VC1 && profile) {
    codec_name << nice_name << " " << tolower(profile);
    if (par->level != FF_LEVEL_UNKNOWN) {
      codec_name << " L" << par->level;
    }
  } else if (id == AV_CODEC_ID_DTS && profile) {
    codec_name << tolower(profile);
  } else if (id == AV_CODEC_ID_JPEG2000 && profile) {
    codec_name << tolower(profile);
  } else if (nice_name) {
    codec_name << nice_name;
    if (profile)
      codec_name << " " << tolower(profile);
  } else if (desc && desc->name) {
    codec_name << desc->name;
    if (profile)
      codec_name << " " << tolower(profile);
  } else if (p && p->name) {
    codec_name << p->name;
    if (profile)
      codec_name << " " << tolower(profile);
  } else {
    /* output avi tags */
    char buf[32];
    av_get_codec_tag_string(buf, sizeof(buf), par->codec_tag);
    codec_name << buf;
    sprintf_s(buf, "0x%04X", par->codec_tag);
    codec_name  << " / " << buf;
  }
  return codec_name.str();
}
Exemplo n.º 15
0
static int parse_pixel_format(AVCodecContext *avctx)
{
    DDSContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    char buf[32];
    uint32_t flags, fourcc, gimp_tag;
    enum DDSDXGIFormat dxgi;
    int size, bpp, r, g, b, a;
    int alpha_exponent, ycocg_classic, ycocg_scaled, normal_map, array;

    /* Alternative DDS implementations use reserved1 as custom header. */
    bytestream2_skip(gbc, 4 * 3);
    gimp_tag = bytestream2_get_le32(gbc);
    alpha_exponent = gimp_tag == MKTAG('A', 'E', 'X', 'P');
    ycocg_classic  = gimp_tag == MKTAG('Y', 'C', 'G', '1');
    ycocg_scaled   = gimp_tag == MKTAG('Y', 'C', 'G', '2');
    bytestream2_skip(gbc, 4 * 7);

    /* Now the real DDPF starts. */
    size = bytestream2_get_le32(gbc);
    if (size != 32) {
        av_log(avctx, AV_LOG_ERROR, "Invalid pixel format header %d.\n", size);
        return AVERROR_INVALIDDATA;
    }
    flags = bytestream2_get_le32(gbc);
    ctx->compressed = flags & DDPF_FOURCC;
    ctx->paletted   = flags & DDPF_PALETTE;
    normal_map      = flags & DDPF_NORMALMAP;
    fourcc = bytestream2_get_le32(gbc);

    if (ctx->compressed && ctx->paletted) {
        av_log(avctx, AV_LOG_WARNING,
               "Disabling invalid palette flag for compressed dds.\n");
        ctx->paletted = 0;
    }

    bpp = bytestream2_get_le32(gbc); // rgbbitcount
    r   = bytestream2_get_le32(gbc); // rbitmask
    g   = bytestream2_get_le32(gbc); // gbitmask
    b   = bytestream2_get_le32(gbc); // bbitmask
    a   = bytestream2_get_le32(gbc); // abitmask

    bytestream2_skip(gbc, 4); // caps
    bytestream2_skip(gbc, 4); // caps2
    bytestream2_skip(gbc, 4); // caps3
    bytestream2_skip(gbc, 4); // caps4
    bytestream2_skip(gbc, 4); // reserved2

    av_get_codec_tag_string(buf, sizeof(buf), fourcc);
    av_log(avctx, AV_LOG_VERBOSE, "fourcc %s bpp %d "
           "r 0x%x g 0x%x b 0x%x a 0x%x\n", buf, bpp, r, g, b, a);
    if (gimp_tag) {
        av_get_codec_tag_string(buf, sizeof(buf), gimp_tag);
        av_log(avctx, AV_LOG_VERBOSE, "and GIMP-DDS tag %s\n", buf);
    }

    if (ctx->compressed)
        avctx->pix_fmt = AV_PIX_FMT_RGBA;

    if (ctx->compressed) {
        switch (fourcc) {
        case MKTAG('D', 'X', 'T', '1'):
            ctx->tex_ratio = 8;
            ctx->tex_funct = ctx->texdsp.dxt1a_block;
            break;
        case MKTAG('D', 'X', 'T', '2'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt2_block;
            break;
        case MKTAG('D', 'X', 'T', '3'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt3_block;
            break;
        case MKTAG('D', 'X', 'T', '4'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt4_block;
            break;
        case MKTAG('D', 'X', 'T', '5'):
            ctx->tex_ratio = 16;
            if (ycocg_scaled)
                ctx->tex_funct = ctx->texdsp.dxt5ys_block;
            else if (ycocg_classic)
                ctx->tex_funct = ctx->texdsp.dxt5y_block;
            else
                ctx->tex_funct = ctx->texdsp.dxt5_block;
            break;
        case MKTAG('R', 'X', 'G', 'B'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt5_block;
            /* This format may be considered as a normal map,
             * but it is handled differently in a separate postproc. */
            ctx->postproc = DDS_SWIZZLE_RXGB;
            normal_map = 0;
            break;
        case MKTAG('A', 'T', 'I', '1'):
        case MKTAG('B', 'C', '4', 'U'):
            ctx->tex_ratio = 8;
            ctx->tex_funct = ctx->texdsp.rgtc1u_block;
            break;
        case MKTAG('B', 'C', '4', 'S'):
            ctx->tex_ratio = 8;
            ctx->tex_funct = ctx->texdsp.rgtc1s_block;
            break;
        case MKTAG('A', 'T', 'I', '2'):
            /* RGT2 variant with swapped R and G (3Dc)*/
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxn3dc_block;
            break;
        case MKTAG('B', 'C', '5', 'U'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.rgtc2u_block;
            break;
        case MKTAG('B', 'C', '5', 'S'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.rgtc2s_block;
            break;
        case MKTAG('U', 'Y', 'V', 'Y'):
            ctx->compressed = 0;
            avctx->pix_fmt = AV_PIX_FMT_UYVY422;
            break;
        case MKTAG('Y', 'U', 'Y', '2'):
            ctx->compressed = 0;
            avctx->pix_fmt = AV_PIX_FMT_YUYV422;
            break;
        case MKTAG('P', '8', ' ', ' '):
            /* ATI Palette8, same as normal palette */
            ctx->compressed = 0;
            ctx->paletted   = 1;
            avctx->pix_fmt  = AV_PIX_FMT_PAL8;
            break;
        case MKTAG('D', 'X', '1', '0'):
            /* DirectX 10 extra header */
            dxgi = bytestream2_get_le32(gbc);
            bytestream2_skip(gbc, 4); // resourceDimension
            bytestream2_skip(gbc, 4); // miscFlag
            array = bytestream2_get_le32(gbc);
            bytestream2_skip(gbc, 4); // miscFlag2

            if (array != 0)
                av_log(avctx, AV_LOG_VERBOSE,
                       "Found array of size %d (ignored).\n", array);

            /* Only BC[1-5] are actually compressed. */
            ctx->compressed = (dxgi >= 70) && (dxgi <= 84);

            av_log(avctx, AV_LOG_VERBOSE, "DXGI format %d.\n", dxgi);
            switch (dxgi) {
            /* RGB types. */
            case DXGI_FORMAT_R16G16B16A16_TYPELESS:
            case DXGI_FORMAT_R16G16B16A16_FLOAT:
            case DXGI_FORMAT_R16G16B16A16_UNORM:
            case DXGI_FORMAT_R16G16B16A16_UINT:
            case DXGI_FORMAT_R16G16B16A16_SNORM:
            case DXGI_FORMAT_R16G16B16A16_SINT:
                avctx->pix_fmt = AV_PIX_FMT_BGRA64;
                break;
            case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_R8G8B8A8_TYPELESS:
            case DXGI_FORMAT_R8G8B8A8_UNORM:
            case DXGI_FORMAT_R8G8B8A8_UINT:
            case DXGI_FORMAT_R8G8B8A8_SNORM:
            case DXGI_FORMAT_R8G8B8A8_SINT:
                avctx->pix_fmt = AV_PIX_FMT_BGRA;
                break;
            case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_B8G8R8A8_TYPELESS:
            case DXGI_FORMAT_B8G8R8A8_UNORM:
                avctx->pix_fmt = AV_PIX_FMT_RGBA;
                break;
            case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_B8G8R8X8_TYPELESS:
            case DXGI_FORMAT_B8G8R8X8_UNORM:
                avctx->pix_fmt = AV_PIX_FMT_RGBA; // opaque
                break;
            case DXGI_FORMAT_B5G6R5_UNORM:
                avctx->pix_fmt = AV_PIX_FMT_RGB565LE;
                break;
            /* Texture types. */
            case DXGI_FORMAT_BC1_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_BC1_TYPELESS:
            case DXGI_FORMAT_BC1_UNORM:
                ctx->tex_ratio = 8;
                ctx->tex_funct = ctx->texdsp.dxt1a_block;
                break;
            case DXGI_FORMAT_BC2_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_BC2_TYPELESS:
            case DXGI_FORMAT_BC2_UNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.dxt3_block;
                break;
            case DXGI_FORMAT_BC3_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_BC3_TYPELESS:
            case DXGI_FORMAT_BC3_UNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.dxt5_block;
                break;
            case DXGI_FORMAT_BC4_TYPELESS:
            case DXGI_FORMAT_BC4_UNORM:
                ctx->tex_ratio = 8;
                ctx->tex_funct = ctx->texdsp.rgtc1u_block;
                break;
            case DXGI_FORMAT_BC4_SNORM:
                ctx->tex_ratio = 8;
                ctx->tex_funct = ctx->texdsp.rgtc1s_block;
                break;
            case DXGI_FORMAT_BC5_TYPELESS:
            case DXGI_FORMAT_BC5_UNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.rgtc2u_block;
                break;
            case DXGI_FORMAT_BC5_SNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.rgtc2s_block;
                break;
            default:
                av_log(avctx, AV_LOG_ERROR,
                       "Unsupported DXGI format %d.\n", dxgi);
                return AVERROR_INVALIDDATA;
            }
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported %s fourcc.\n", buf);
            return AVERROR_INVALIDDATA;
        }
    } else if (ctx->paletted) {
        if (bpp == 8) {
            avctx->pix_fmt = AV_PIX_FMT_PAL8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported palette bpp %d.\n", bpp);
            return AVERROR_INVALIDDATA;
        }
    } else {
        /*  8 bpp */
        if (bpp == 8 && r == 0xff && g == 0 && b == 0 && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_GRAY8;
        /* 16 bpp */
        else if (bpp == 16 && r == 0xff && g == 0 && b == 0 && a == 0xff00)
            avctx->pix_fmt = AV_PIX_FMT_YA8;
        else if (bpp == 16 && r == 0xffff && g == 0 && b == 0 && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
        else if (bpp == 16 && r == 0xf800 && g == 0x7e0 && b == 0x1f && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_RGB565LE;
        /* 24 bpp */
        else if (bpp == 24 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_BGR24;
        /* 32 bpp */
        else if (bpp == 32 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_BGR0; // opaque
        else if (bpp == 32 && r == 0xff && g == 0xff00 && b == 0xff0000 && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_RGB0; // opaque
        else if (bpp == 32 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0xff000000)
            avctx->pix_fmt = AV_PIX_FMT_BGRA;
        else if (bpp == 32 && r == 0xff && g == 0xff00 && b == 0xff0000 && a == 0xff000000)
            avctx->pix_fmt = AV_PIX_FMT_RGBA;
        /* give up */
        else {
            av_log(avctx, AV_LOG_ERROR, "Unknown pixel format "
                   "[bpp %d r 0x%x g 0x%x b 0x%x a 0x%x].\n", bpp, r, g, b, a);
            return AVERROR_INVALIDDATA;
        }
    }

    /* Set any remaining post-proc that should happen before frame is ready. */
    if (alpha_exponent)
        ctx->postproc = DDS_ALPHA_EXP;
    else if (normal_map)
        ctx->postproc = DDS_NORMAL_MAP;
    else if (ycocg_classic && !ctx->compressed)
        ctx->postproc = DDS_RAW_YCOCG;
    else if (avctx->pix_fmt == AV_PIX_FMT_YA8)
        ctx->postproc = DDS_SWAP_ALPHA;

    /* ATI/NVidia variants sometimes add swizzling in bpp. */
    switch (bpp) {
    case MKTAG('A', '2', 'X', 'Y'):
        ctx->postproc = DDS_SWIZZLE_A2XY;
        break;
    case MKTAG('x', 'G', 'B', 'R'):
        ctx->postproc = DDS_SWIZZLE_XGBR;
        break;
    case MKTAG('x', 'R', 'B', 'G'):
        ctx->postproc = DDS_SWIZZLE_XRBG;
        break;
    case MKTAG('R', 'B', 'x', 'G'):
        ctx->postproc = DDS_SWIZZLE_RBXG;
        break;
    case MKTAG('R', 'G', 'x', 'B'):
        ctx->postproc = DDS_SWIZZLE_RGXB;
        break;
    case MKTAG('R', 'x', 'B', 'G'):
        ctx->postproc = DDS_SWIZZLE_RXBG;
        break;
    case MKTAG('x', 'G', 'x', 'R'):
        ctx->postproc = DDS_SWIZZLE_XGXR;
        break;
    case MKTAG('A', '2', 'D', '5'):
        ctx->postproc = DDS_NORMAL_MAP;
        break;
    }

    return 0;
}
Exemplo n.º 16
0
int main(int argc, char **argv)
{
    int i, list_fourcc_pix_fmt = 0, list_pix_fmt_fourccs = 0;
    const char *pix_fmt_name = NULL;
    char c;

    if (argc == 1) {
        usage();
        return 0;
    }

    while ((c = getopt(argc, argv, "hp:lL")) != -1) {
        switch (c) {
        case 'h':
            usage();
            return 0;
        case 'l':
            list_fourcc_pix_fmt = 1;
            break;
        case 'L':
            list_pix_fmt_fourccs = 1;
            break;
        case 'p':
            pix_fmt_name = optarg;
            break;
        case '?':
            usage();
            return 1;
        }
    }

    if (list_fourcc_pix_fmt) {
        for (i = 0; ff_raw_pix_fmt_tags[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
            char buf[32];
            av_get_codec_tag_string(buf, sizeof(buf), ff_raw_pix_fmt_tags[i].fourcc);
            printf("%s: %s\n", buf, av_get_pix_fmt_name(ff_raw_pix_fmt_tags[i].pix_fmt));
        }
    }

    if (list_pix_fmt_fourccs) {
        for (i = 0; i < AV_PIX_FMT_NB; i++) {
            const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(i);
            if (!pix_desc->name || pix_desc->flags & PIX_FMT_HWACCEL)
                continue;
            printf("%s: ", pix_desc->name);
            print_pix_fmt_fourccs(i, ' ');
            printf("\n");
        }
    }

    if (pix_fmt_name) {
        enum AVPixelFormat pix_fmt = av_get_pix_fmt(pix_fmt_name);
        if (pix_fmt == AV_PIX_FMT_NONE) {
            fprintf(stderr, "Invalid pixel format selected '%s'\n", pix_fmt_name);
            return 1;
        }
        print_pix_fmt_fourccs(pix_fmt, '\n');
    }

    return 0;
}
Exemplo n.º 17
0
static int rsd_read_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    int i, version, start = 0x800;
    AVCodecContext *codec;
    AVStream *st = avformat_new_stream(s, NULL);

    if (!st)
        return AVERROR(ENOMEM);

    avio_skip(pb, 3); // "RSD"
    version = avio_r8(pb) - '0';

    codec = st->codec;
    codec->codec_type = AVMEDIA_TYPE_AUDIO;
    codec->codec_tag  = avio_rl32(pb);
    codec->codec_id   = ff_codec_get_id(rsd_tags, codec->codec_tag);
    if (!codec->codec_id) {
        char tag_buf[5];

        av_get_codec_tag_string(tag_buf, sizeof(tag_buf), codec->codec_tag);
        for (i=0; i < FF_ARRAY_ELEMS(rsd_unsupported_tags); i++) {
            if (codec->codec_tag == rsd_unsupported_tags[i]) {
                avpriv_request_sample(s, "Codec tag: %s", tag_buf);
                return AVERROR_PATCHWELCOME;
            }
        }
        av_log(s, AV_LOG_ERROR, "Unknown codec tag: %s\n", tag_buf);
        return AVERROR_INVALIDDATA;
    }

    codec->channels = avio_rl32(pb);
    if (!codec->channels)
        return AVERROR_INVALIDDATA;

    avio_skip(pb, 4); // Bit depth
    codec->sample_rate = avio_rl32(pb);
    if (!codec->sample_rate)
        return AVERROR_INVALIDDATA;

    avio_skip(pb, 4); // Unknown

    switch (codec->codec_id) {
    case AV_CODEC_ID_ADPCM_IMA_RAD:
        codec->block_align = 20 * codec->channels;
        if (pb->seekable)
            st->duration = av_get_audio_frame_duration(codec, avio_size(pb) - start);
        break;
    case AV_CODEC_ID_ADPCM_THP:
        /* RSD3GADP is mono, so only alloc enough memory
           to store the coeff table for a single channel. */

        start = avio_rl32(pb);

        if (ff_get_extradata(codec, s->pb, 32) < 0)
            return AVERROR(ENOMEM);

        for (i = 0; i < 16; i++)
            AV_WB16(codec->extradata + i * 2, AV_RL16(codec->extradata + i * 2));

        if (pb->seekable)
            st->duration = (avio_size(pb) - start) / 8 * 14;
        break;
    case AV_CODEC_ID_PCM_S16LE:
    case AV_CODEC_ID_PCM_S16BE:
        if (version != 4)
            start = avio_rl32(pb);

        if (pb->seekable)
            st->duration = (avio_size(pb) - start) / 2 / codec->channels;
        break;
    }

    avio_skip(pb, start - avio_tell(pb));

    avpriv_set_pts_info(st, 64, 1, codec->sample_rate);

    return 0;
}
Exemplo n.º 18
0
// Fetch a decoded buffer and place it into the frame parameter.
static int ffmmal_read_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    MMAL_BUFFER_HEADER_T *buffer = NULL;
    MMAL_STATUS_T status = 0;
    int ret = 0;

    if (ctx->eos_received)
        goto done;

    while (1) {
        // To ensure decoding in lockstep with a constant delay between fed packets
        // and output frames, we always wait until an output buffer is available.
        // Except during start we don't know after how many input packets the decoder
        // is going to return the first buffer, and we can't distinguish decoder
        // being busy from decoder waiting for input. So just poll at the start and
        // keep feeding new data to the buffer.
        // We are pretty sure the decoder will produce output if we sent more input
        // frames than what a H.264 decoder could logically delay. This avoids too
        // excessive buffering.
        // We also wait if we sent eos, but didn't receive it yet (think of decoding
        // stream with a very low number of frames).
        if (avpriv_atomic_int_get(&ctx->packets_buffered) > MAX_DELAYED_FRAMES ||
            (ctx->packets_sent && ctx->eos_sent)) {
            // MMAL will ignore broken input packets, which means the frame we
            // expect here may never arrive. Dealing with this correctly is
            // complicated, so here's a hack to avoid that it freezes forever
            // in this unlikely situation.
            buffer = mmal_queue_timedwait(ctx->queue_decoded_frames, 100);
            if (!buffer) {
                av_log(avctx, AV_LOG_ERROR, "Did not get output frame from MMAL.\n");
                ret = AVERROR_UNKNOWN;
                goto done;
            }
        } else {
            buffer = mmal_queue_get(ctx->queue_decoded_frames);
            if (!buffer)
                goto done;
        }

        ctx->eos_received |= !!(buffer->flags & MMAL_BUFFER_HEADER_FLAG_EOS);
        if (ctx->eos_received)
            goto done;

        if (buffer->cmd == MMAL_EVENT_FORMAT_CHANGED) {
            MMAL_COMPONENT_T *decoder = ctx->decoder;
            MMAL_EVENT_FORMAT_CHANGED_T *ev = mmal_event_format_changed_get(buffer);
            MMAL_BUFFER_HEADER_T *stale_buffer;

            av_log(avctx, AV_LOG_INFO, "Changing output format.\n");

            if ((status = mmal_port_disable(decoder->output[0])))
                goto done;

            while ((stale_buffer = mmal_queue_get(ctx->queue_decoded_frames)))
                mmal_buffer_header_release(stale_buffer);

            mmal_format_copy(decoder->output[0]->format, ev->format);

            if ((ret = ffmal_update_format(avctx)) < 0)
                goto done;

            if ((status = mmal_port_enable(decoder->output[0], output_callback)))
                goto done;

            if ((ret = ffmmal_fill_output_port(avctx)) < 0)
                goto done;

            if ((ret = ffmmal_fill_input_port(avctx)) < 0)
                goto done;

            mmal_buffer_header_release(buffer);
            continue;
        } else if (buffer->cmd) {
            char s[20];
            av_get_codec_tag_string(s, sizeof(s), buffer->cmd);
            av_log(avctx, AV_LOG_WARNING, "Unknown MMAL event %s on output port\n", s);
            goto done;
        } else if (buffer->length == 0) {
            // Unused output buffer that got drained after format change.
            mmal_buffer_header_release(buffer);
            continue;
        }

        ctx->frames_output++;

        if ((ret = ffmal_copy_frame(avctx, frame, buffer)) < 0)
            goto done;

        *got_frame = 1;
        break;
    }

done:
    if (buffer)
        mmal_buffer_header_release(buffer);
    if (status && ret >= 0)
        ret = AVERROR_UNKNOWN;
    return ret;
}
Exemplo n.º 19
0
static void show_stream(AVFormatContext *fmt_ctx, int stream_idx)
{
    AVStream *stream = fmt_ctx->streams[stream_idx];
    AVCodecContext *dec_ctx;
    AVCodec *dec;
    const char *profile;
    char val_str[128];
    AVRational display_aspect_ratio;

    probe_object_header("stream");

    probe_int("index", stream->index);

    if ((dec_ctx = stream->codec)) {
        if ((dec = dec_ctx->codec)) {
            probe_str("codec_name", dec->name);
            probe_str("codec_long_name", dec->long_name);
        } else {
            probe_str("codec_name", "unknown");
        }

        probe_str("codec_type", media_type_string(dec_ctx->codec_type));
        probe_str("codec_time_base",
                  rational_string(val_str, sizeof(val_str),
                                  "/", &dec_ctx->time_base));

        /* print AVI/FourCC tag */
        av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
        probe_str("codec_tag_string", val_str);
        probe_str("codec_tag", tag_string(val_str, sizeof(val_str),
                                          dec_ctx->codec_tag));

        /* print profile, if there is one */
        if (dec && (profile = av_get_profile_name(dec, dec_ctx->profile)))
            probe_str("profile", profile);

        switch (dec_ctx->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            probe_int("width", dec_ctx->width);
            probe_int("height", dec_ctx->height);
            probe_int("has_b_frames", dec_ctx->has_b_frames);
            if (dec_ctx->sample_aspect_ratio.num) {
                probe_str("sample_aspect_ratio",
                          rational_string(val_str, sizeof(val_str), ":",
                          &dec_ctx->sample_aspect_ratio));
                av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
                          dec_ctx->width  * dec_ctx->sample_aspect_ratio.num,
                          dec_ctx->height * dec_ctx->sample_aspect_ratio.den,
                          1024*1024);
                probe_str("display_aspect_ratio",
                          rational_string(val_str, sizeof(val_str), ":",
                          &display_aspect_ratio));
            }
            probe_str("pix_fmt",
                      dec_ctx->pix_fmt != PIX_FMT_NONE ? av_pix_fmt_descriptors[dec_ctx->pix_fmt].name
                                                    : "unknown");
            probe_int("level", dec_ctx->level);
            break;

        case AVMEDIA_TYPE_AUDIO:
            probe_str("sample_rate",
                      value_string(val_str, sizeof(val_str),
                                   dec_ctx->sample_rate,
                                   unit_hertz_str));
            probe_int("channels", dec_ctx->channels);
            probe_int("bits_per_sample",
                      av_get_bits_per_sample(dec_ctx->codec_id));
            break;
        }
    } else {
        probe_str("codec_type", "unknown");
    }

    if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS)
        probe_int("id", stream->id);
    probe_str("avg_frame_rate",
              rational_string(val_str, sizeof(val_str), "/",
              &stream->avg_frame_rate));
    probe_str("time_base",
              rational_string(val_str, sizeof(val_str), "/",
              &stream->time_base));
    probe_str("start_time",
              time_value_string(val_str, sizeof(val_str),
                                stream->start_time, &stream->time_base));
    probe_str("duration",
              time_value_string(val_str, sizeof(val_str),
                                stream->duration, &stream->time_base));
    if (stream->nb_frames)
        probe_int("nb_frames", stream->nb_frames);

    probe_dict(stream->metadata, "tags");

    probe_object_footer("stream");
}