Ejemplo n.º 1
0
int ff_h264_fill_default_ref_list(H264Context *h)
{
	MpegEncContext * const s = &h->s;
	int i, len;

	if(h->slice_type_nos==FF_B_TYPE)
	{
		Picture *sorted[32];
		int cur_poc, list;
		int lens[2];

		if(FIELD_PICTURE)
			cur_poc= s->current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
		else
			cur_poc= s->current_picture_ptr->poc;

		for(list= 0; list<2; list++)
		{
			len= add_sorted(sorted    , h->short_ref, h->short_ref_count, cur_poc, 1^list);
			len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
			assert(len<=32);
			len= build_def_list(h->default_ref_list[list]    , sorted     , len, 0, s->picture_structure);
			len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
			assert(len<=32);

			if(len < h->ref_count[list])
				memset(&h->default_ref_list[list][len], 0, sizeof(Picture)*(h->ref_count[list] - len));
			lens[list]= len;
		}

		if(lens[0] == lens[1] && lens[1] > 1)
		{
			for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
			if(i == lens[0])
				FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
		}
	}
	else
	{
		len = build_def_list(h->default_ref_list[0]    , h->short_ref, h->short_ref_count, 0, s->picture_structure);
		len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16                , 1, s->picture_structure);
		assert(len <= 32);
		if(len < h->ref_count[0])
			memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
	}
#ifdef TRACE
	for (i=0; i<h->ref_count[0]; i++)
	{
		tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
	}
	if(h->slice_type_nos==FF_B_TYPE)
	{
		for (i=0; i<h->ref_count[1]; i++)
		{
			tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
		}
	}
#endif
	return 0;
}
Ejemplo n.º 2
0
static void do_swizzle(AVFrame *frame, int x, int y)
{
    int i;
    for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
        uint8_t *src = frame->data[0] + i;
        FFSWAP(uint8_t, src[x], src[y]);
    }
}
Ejemplo n.º 3
0
static int32_t add_cb(int32_t a, int32_t b)
{
    if (a < b)
        FFSWAP(int32_t, a, b);

    if (a - b >= 256)
        return a;
    return a + cb_to_add[a - b];
}
Ejemplo n.º 4
0
Archivo: mux.c Proyecto: pigoz/libav
//FIXME merge with compute_pkt_fields
static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
    int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
    int num, den, i;

    av_dlog(s, "compute_pkt_fields2: pts:%" PRId64 " dts:%" PRId64 " cur_dts:%" PRId64 " b:%d size:%d st:%d\n",
            pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);

/*    if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
 *      return AVERROR(EINVAL);*/

    /* duration field */
    if (pkt->duration == 0) {
        ff_compute_frame_duration(s, &num, &den, st, NULL, pkt);
        if (den && num) {
            pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
        }
    }

    if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
        pkt->pts = pkt->dts;

    //calculate dts from pts
    if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
        st->pts_buffer[0] = pkt->pts;
        for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
            st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
        for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
            FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);

        pkt->dts = st->pts_buffer[0];
    }

    if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
        ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
          st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
        av_log(s, AV_LOG_ERROR,
               "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
               st->index, st->cur_dts, pkt->dts);
        return AVERROR(EINVAL);
    }
    if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
        av_log(s, AV_LOG_ERROR,
               "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
               pkt->pts, pkt->dts,
               st->index);
        return AVERROR(EINVAL);
    }

    av_dlog(s, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n",
            pkt->pts, pkt->dts);
    st->cur_dts = pkt->dts;

    return 0;
}
Ejemplo n.º 5
0
void av_rc4_crypt(AVRC4 *r, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt) {
    uint8_t x = r->x, y = r->y;
    uint8_t *state = r->state;
    while (count-- > 0) {
        uint8_t sum = state[x] + state[y];
        FFSWAP(uint8_t, state[x], state[y]);
        *dst++ = src ? *src++ ^ state[sum] : state[sum];
        x++;
        y += state[x];
    }
    r->x = x; r->y = y;
}
Ejemplo n.º 6
0
static void heap_sift(HeapElem *h, int root, int size)
{
    while(root*2+1 < size) {
        int child = root*2+1;
        if(child < size-1 && h[child].val > h[child+1].val)
            child++;
        if(h[root].val > h[child].val) {
            FFSWAP(HeapElem, h[root], h[child]);
            root = child;
        } else
            break;
    }
}
Ejemplo n.º 7
0
void ff_rc4_enc(const uint8_t *key, int keylen, uint8_t *data, int datalen) {
    int i, j;
    uint8_t x, y;
    uint8_t state[256];
    for (i = 0; i < 256; i++)
        state[i] = i;
    y = 0;
    // j is i % keylen
    for (j = 0, i = 0; i < 256; i++, j++) {
        if (j == keylen) j = 0;
        y += state[i] + key[j];
        FFSWAP(uint8_t, state[i], state[y]);
    }
    // state initialized, now do the real encryption
    x = 1; y = state[1];
    while (datalen-- > 0) {
        uint8_t sum = state[x] + state[y];
        FFSWAP(uint8_t, state[x], state[y]);
        *data++ ^= state[sum];
        x++;
        y += state[x];
    }
}
Ejemplo n.º 8
0
/**
 * nodes size must be 2*nb_codes
 * first nb_codes nodes.count must be set
 */
int ff_huff_build_tree(AVCodecContext *avctx, VLC *vlc, int nb_codes,
                       Node *nodes, HuffCmp cmp, int flags)
{
    int i, j;
    int cur_node;
    int64_t sum = 0;

    for(i = 0; i < nb_codes; i++)
    {
        nodes[i].sym = i;
        nodes[i].n0 = -2;
        sum += nodes[i].count;
    }

    if(sum >> 31)
    {
        av_log(avctx, AV_LOG_ERROR, "Too high symbol frequencies. Tree construction is not possible\n");
        return -1;
    }
    qsort(nodes, nb_codes, sizeof(Node), cmp);
    cur_node = nb_codes;
    nodes[nb_codes*2-1].count = 0;
    for(i = 0; i < nb_codes * 2 - 1; i += 2)
    {
        nodes[cur_node].sym = HNODE;
        nodes[cur_node].count = nodes[i].count + nodes[i+1].count;
        nodes[cur_node].n0 = i;
        for(j = cur_node; j > 0; j--)
        {
            if(nodes[j].count > nodes[j-1].count ||
                    (nodes[j].count == nodes[j-1].count &&
                     (!(flags & FF_HUFFMAN_FLAG_HNODE_FIRST) ||
                      nodes[j].n0 == j - 1 || nodes[j].n0 == j - 2 ||
                      (nodes[j].sym != HNODE && nodes[j-1].sym != HNODE))))
                break;
            FFSWAP(Node, nodes[j], nodes[j-1]);
        }
        cur_node++;
    }
    if(build_huff_tree(vlc, nodes, nb_codes * 2 - 2, flags) < 0)
    {
        av_log(avctx, AV_LOG_ERROR, "Error building tree\n");
        return -1;
    }
    return 0;
}
Ejemplo n.º 9
0
static void swap_channel_layouts_on_filter(AVFilterContext *filter)
{
    AVFilterLink *link = NULL;
    uint64_t chlayout;
    int i, j;

    for (i = 0; i < filter->nb_inputs; i++) {
        link = filter->inputs[i];

        if (link->type == AVMEDIA_TYPE_AUDIO &&
            link->out_channel_layouts->nb_channel_layouts == 1)
            break;
    }
    if (i == filter->nb_inputs)
        return;

    chlayout = link->out_channel_layouts->channel_layouts[0];

    for (i = 0; i < filter->nb_outputs; i++) {
        AVFilterLink *outlink = filter->outputs[i];
        int best_idx, best_score = INT_MIN;

        if (outlink->type != AVMEDIA_TYPE_AUDIO ||
            outlink->in_channel_layouts->nb_channel_layouts < 2)
            continue;

        for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
            uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
            int matched_channels  = av_get_channel_layout_nb_channels(chlayout &
                                                                      out_chlayout);
            int extra_channels     = av_get_channel_layout_nb_channels(out_chlayout &
                                                                       (~chlayout));
            int score = matched_channels - extra_channels;

            if (score > best_score) {
                best_score = score;
                best_idx   = j;
            }
        }
        FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
               outlink->in_channel_layouts->channel_layouts[best_idx]);
    }

}
Ejemplo n.º 10
0
int av_rc4_init(AVRC4 *r, const uint8_t *key, int key_bits, int decrypt) {
    int i, j;
    uint8_t y;
    uint8_t *state = r->state;
    int keylen = key_bits >> 3;
    if (key_bits & 7)
        return -1;
    for (i = 0; i < 256; i++)
        state[i] = i;
    y = 0;
    // j is i % keylen
    for (j = 0, i = 0; i < 256; i++, j++) {
        if (j == keylen) j = 0;
        y += state[i] + key[j];
        FFSWAP(uint8_t, state[i], state[y]);
    }
    r->x = 1;
    r->y = state[1];
    return 0;
}
Ejemplo n.º 11
0
static void h264_initialise_ref_list(H264Context *h, H264SliceContext *sl)
{
    int i, len;
    int j;

    if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
        H264Picture *sorted[32];
        int cur_poc, list;
        int lens[2];

        if (FIELD_PICTURE(h))
            cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];
        else
            cur_poc = h->cur_pic_ptr->poc;

        for (list = 0; list < 2; list++) {
            len  = add_sorted(sorted,       h->short_ref, h->short_ref_count, cur_poc, 1 ^ list);
            len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list);
            av_assert0(len <= 32);

            len  = build_def_list(sl->ref_list[list], FF_ARRAY_ELEMS(sl->ref_list[0]),
                                  sorted, len, 0, h->picture_structure);
            len += build_def_list(sl->ref_list[list] + len,
                                  FF_ARRAY_ELEMS(sl->ref_list[0]) - len,
                                  h->long_ref, 16, 1, h->picture_structure);
            av_assert0(len <= 32);

            if (len < sl->ref_count[list])
                memset(&sl->ref_list[list][len], 0, sizeof(H264Ref) * (sl->ref_count[list] - len));
            lens[list] = len;
        }

        if (lens[0] == lens[1] && lens[1] > 1) {
            for (i = 0; i < lens[0] &&
                        sl->ref_list[0][i].parent->f->buf[0]->buffer ==
                        sl->ref_list[1][i].parent->f->buf[0]->buffer; i++);
            if (i == lens[0]) {
                FFSWAP(H264Ref, sl->ref_list[1][0], sl->ref_list[1][1]);
            }
        }
    } else {
        len  = build_def_list(sl->ref_list[0], FF_ARRAY_ELEMS(sl->ref_list[0]),
                              h->short_ref, h->short_ref_count, 0, h->picture_structure);
        len += build_def_list(sl->ref_list[0] + len,
                              FF_ARRAY_ELEMS(sl->ref_list[0]) - len,
                              h-> long_ref, 16, 1, h->picture_structure);
        av_assert0(len <= 32);

        if (len < sl->ref_count[0])
            memset(&sl->ref_list[0][len], 0, sizeof(H264Ref) * (sl->ref_count[0] - len));
    }

    for (j = 0; j<1+(sl->slice_type_nos == AV_PICTURE_TYPE_B); j++) {
        for (i = 0; i < sl->ref_count[j]; i++) {
            if (sl->ref_list[j][i].parent) {
                if (mismatches_ref(h, sl->ref_list[j][i].parent)) {
                    av_log(h->avctx, AV_LOG_ERROR, "Discarding mismatching reference\n");
                    memset(&sl->ref_list[j][i], 0, sizeof(sl->ref_list[j][i]));
                }
            }
        }
    }
    for (i = 0; i < sl->list_count; i++)
        h->default_ref[i] = sl->ref_list[i][0];
}
Ejemplo n.º 12
0
static void swap_channel_layouts_on_filter(AVFilterContext *filter)
{
    AVFilterLink *link = NULL;
    int i, j, k;

    for (i = 0; i < filter->nb_inputs; i++) {
        link = filter->inputs[i];

        if (link->type == AVMEDIA_TYPE_AUDIO &&
            link->out_channel_layouts->nb_channel_layouts == 1)
            break;
    }
    if (i == filter->nb_inputs)
        return;

    for (i = 0; i < filter->nb_outputs; i++) {
        AVFilterLink *outlink = filter->outputs[i];
        int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;

        if (outlink->type != AVMEDIA_TYPE_AUDIO ||
            outlink->in_channel_layouts->nb_channel_layouts < 2)
            continue;

        for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
            uint64_t  in_chlayout = link->out_channel_layouts->channel_layouts[0];
            uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
            int  in_channels      = av_get_channel_layout_nb_channels(in_chlayout);
            int out_channels      = av_get_channel_layout_nb_channels(out_chlayout);
            int count_diff        = out_channels - in_channels;
            int matched_channels, extra_channels;
            int score = 100000;

            if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {
                /* Compute score in case the input or output layout encodes
                   a channel count; in this case the score is not altered by
                   the computation afterwards, as in_chlayout and
                   out_chlayout have both been set to 0 */
                if (FF_LAYOUT2COUNT(in_chlayout))
                    in_channels = FF_LAYOUT2COUNT(in_chlayout);
                if (FF_LAYOUT2COUNT(out_chlayout))
                    out_channels = FF_LAYOUT2COUNT(out_chlayout);
                score -= 10000 + FFABS(out_channels - in_channels) +
                         (in_channels > out_channels ? 10000 : 0);
                in_chlayout = out_chlayout = 0;
                /* Let the remaining computation run, even if the score
                   value is not altered */
            }

            /* channel substitution */
            for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
                uint64_t cmp0 = ch_subst[k][0];
                uint64_t cmp1 = ch_subst[k][1];
                if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&
                    (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {
                    in_chlayout  &= ~cmp0;
                    out_chlayout &= ~cmp1;
                    /* add score for channel match, minus a deduction for
                       having to do the substitution */
                    score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;
                }
            }

            /* no penalty for LFE channel mismatch */
            if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&
                (out_chlayout & AV_CH_LOW_FREQUENCY))
                score += 10;
            in_chlayout  &= ~AV_CH_LOW_FREQUENCY;
            out_chlayout &= ~AV_CH_LOW_FREQUENCY;

            matched_channels = av_get_channel_layout_nb_channels(in_chlayout &
                                                                 out_chlayout);
            extra_channels   = av_get_channel_layout_nb_channels(out_chlayout &
                                                                 (~in_chlayout));
            score += 10 * matched_channels - 5 * extra_channels;

            if (score > best_score ||
                (count_diff < best_count_diff && score == best_score)) {
                best_score = score;
                best_idx   = j;
                best_count_diff = count_diff;
            }
        }
        av_assert0(best_idx >= 0);
        FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
               outlink->in_channel_layouts->channel_layouts[best_idx]);
    }

}
Ejemplo n.º 13
0
static int zerocodec_decode_frame(AVCodecContext *avctx, void *data,
                                  int *data_size, AVPacket *avpkt)
{
    ZeroCodecContext *zc = avctx->priv_data;
    AVFrame *pic         = avctx->coded_frame;
    AVFrame *prev_pic    = &zc->previous_frame;
    z_stream *zstream    = &zc->zstream;
    uint8_t *prev = prev_pic->data[0], *dst;
    int i, j, zret;

    pic->reference = 3;

    if (avpkt->flags & AV_PKT_FLAG_KEY) {
        pic->key_frame = 1;
        pic->pict_type = AV_PICTURE_TYPE_I;
    } else {
        if (!prev) {
            av_log(avctx, AV_LOG_ERROR, "Missing reference frame!\n");
            return AVERROR_INVALIDDATA;
        }
        pic->key_frame = 0;
        pic->pict_type = AV_PICTURE_TYPE_P;
    }

    if (avctx->get_buffer(avctx, pic) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
        return AVERROR(ENOMEM);
    }

    zret = inflateReset(zstream);

    if (zret != Z_OK) {
        av_log(avctx, AV_LOG_ERROR, "Could not reset inflate: %d\n", zret);
        return AVERROR(EINVAL);
    }

    zstream->next_in   = avpkt->data;
    zstream->avail_in  = avpkt->size;

    dst  = pic->data[0];

    /**
     * ZeroCodec has very simple interframe compression. If a value
     * is the same as the previous frame, set it to 0.
     */

    for (i = 0; i < avctx->height; i++) {
        zstream->next_out  = dst;
        zstream->avail_out = avctx->width << 1;
        zret = inflate(zstream, Z_SYNC_FLUSH);
        if (zret != Z_OK && zret != Z_STREAM_END) {
            av_log(avctx, AV_LOG_ERROR,
                   "Inflate failed with return code: %d\n", zret);
            return AVERROR(EINVAL);
        }

        if (!(avpkt->flags & AV_PKT_FLAG_KEY))
            for (j = 0; j < avctx->width << 1; j++)
                dst[j] += prev[j] & -!dst[j];

        prev += prev_pic->linesize[0];
        dst  += pic->linesize[0];
    }

    /* Release the previous buffer if need be */
    if (prev_pic->data[0])
        avctx->release_buffer(avctx, prev_pic);

    *data_size       = sizeof(AVFrame);
    *(AVFrame *)data = *pic;

    /* Store the previous frame for use later.
     * FFSWAP ensures that e.g. pic->data is NULLed. */
    FFSWAP(AVFrame, *pic, *prev_pic);

    return avpkt->size;
}
Ejemplo n.º 14
0
static int scc_read_header(AVFormatContext *s)
{
    SCCContext *scc = s->priv_data;
    AVStream *st = avformat_new_stream(s, NULL);
    char line[4096], line2[4096];
    int count = 0, ret = 0;
    ptrdiff_t len2, len;
    uint8_t out[4096];
    FFTextReader tr;

    ff_text_init_avio(s, &tr, s->pb);

    if (!st)
        return AVERROR(ENOMEM);
    avpriv_set_pts_info(st, 64, 1, 1000);
    st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
    st->codecpar->codec_id   = AV_CODEC_ID_EIA_608;

    while (!ff_text_eof(&tr)) {
        const int64_t pos = ff_text_pos(&tr);
        char *saveptr = NULL, *lline;
        int hh1, mm1, ss1, fs1, i;
        int hh2, mm2, ss2, fs2;
        int64_t ts_start, ts_end;
        AVPacket *sub;

        if (count == 0) {
            while (!ff_text_eof(&tr)) {
                len = ff_subtitles_read_line(&tr, line, sizeof(line));
                if (len > 13)
                    break;
            }
        }

        if (!strncmp(line, "Scenarist_SCC V1.0", 18))
            continue;
        if (sscanf(line, "%d:%d:%d%*[:;]%d", &hh1, &mm1, &ss1, &fs1) != 4)
            continue;

        ts_start = (hh1 * 3600LL + mm1 * 60LL + ss1) * 1000LL + fs1 * 33;

        while (!ff_text_eof(&tr)) {
            len2 = ff_subtitles_read_line(&tr, line2, sizeof(line2));
            if (len2 > 13)
                break;
        }
        if (sscanf(line2, "%d:%d:%d%*[:;]%d", &hh2, &mm2, &ss2, &fs2) != 4)
            continue;

        ts_end = (hh2 * 3600LL + mm2 * 60LL + ss2) * 1000LL + fs2 * 33;
        count++;

        lline = (char *)&line;
        lline += 12;

        for (i = 0; i < 4095; i += 3) {
            char *ptr = av_strtok(lline, " ", &saveptr);
            char c1, c2, c3, c4;

            if (!ptr)
                break;

            if (sscanf(ptr, "%c%c%c%c", &c1, &c2, &c3, &c4) != 4)
                break;

            lline = NULL;
            out[i+0] = 0xfc;
            out[i+1] = convert(c2) | (convert(c1) << 4);
            out[i+2] = convert(c4) | (convert(c3) << 4);
        }
        out[i] = 0;

        sub = ff_subtitles_queue_insert(&scc->q, out, i, 0);
        if (!sub)
            return AVERROR(ENOMEM);

        sub->pos = pos;
        sub->pts = ts_start;
        sub->duration = FFMAX(1200, ts_end - ts_start);
        memmove(line, line2, sizeof(line));
        FFSWAP(ptrdiff_t, len, len2);
    }

    ff_subtitles_queue_finalize(s, &scc->q);

    return ret;
}
Ejemplo n.º 15
0
int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
{
    int i, len;
    int j;

    if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
        H264Picture *sorted[32];
        int cur_poc, list;
        int lens[2];

        if (FIELD_PICTURE(h))
            cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];
        else
            cur_poc = h->cur_pic_ptr->poc;

        for (list = 0; list < 2; list++) {
            len  = add_sorted(sorted,       h->short_ref, h->short_ref_count, cur_poc, 1 ^ list);
            len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list);
            av_assert0(len <= 32);

            len  = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]),
                                  sorted, len, 0, h->picture_structure);
            len += build_def_list(h->default_ref_list[list] + len,
                                  FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
                                  h->long_ref, 16, 1, h->picture_structure);
            av_assert0(len <= 32);

            if (len < sl->ref_count[list])
                memset(&h->default_ref_list[list][len], 0, sizeof(H264Ref) * (sl->ref_count[list] - len));
            lens[list] = len;
        }

        if (lens[0] == lens[1] && lens[1] > 1) {
            for (i = 0; i < lens[0] &&
                        h->default_ref_list[0][i].parent->f->buf[0]->buffer ==
                        h->default_ref_list[1][i].parent->f->buf[0]->buffer; i++);
            if (i == lens[0]) {
                FFSWAP(H264Ref, h->default_ref_list[1][0], h->default_ref_list[1][1]);
            }
        }
    } else {
        len  = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]),
                              h->short_ref, h->short_ref_count, 0, h->picture_structure);
        len += build_def_list(h->default_ref_list[0] + len,
                              FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
                              h-> long_ref, 16, 1, h->picture_structure);
        av_assert0(len <= 32);

        if (len < sl->ref_count[0])
            memset(&h->default_ref_list[0][len], 0, sizeof(H264Ref) * (sl->ref_count[0] - len));
    }
#ifdef TRACE
    for (i = 0; i < sl->ref_count[0]; i++) {
        ff_tlog(h->avctx, "List0: %s fn:%d 0x%p\n",
                h->default_ref_list[0][i].parent ? (h->default_ref_list[0][i].parent->long_ref ? "LT" : "ST") : "NULL",
                h->default_ref_list[0][i].pic_id,
                h->default_ref_list[0][i].parent ? h->default_ref_list[0][i].parent->f->data[0] : 0);
    }
    if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
        for (i = 0; i < sl->ref_count[1]; i++) {
            ff_tlog(h->avctx, "List1: %s fn:%d 0x%p\n",
                    h->default_ref_list[1][i].parent ? (h->default_ref_list[1][i].parent->long_ref ? "LT" : "ST") : "NULL",
                    h->default_ref_list[1][i].pic_id,
                    h->default_ref_list[1][i].parent ? h->default_ref_list[1][i].parent->f->data[0] : 0);
        }
    }
#endif

    for (j = 0; j<1+(sl->slice_type_nos == AV_PICTURE_TYPE_B); j++) {
        for (i = 0; i < sl->ref_count[j]; i++) {
            if (h->default_ref_list[j][i].parent) {
                if (mismatches_ref(h, h->default_ref_list[j][i].parent)) {
                    av_log(h->avctx, AV_LOG_ERROR, "Discarding mismatching reference\n");
                    memset(&h->default_ref_list[j][i], 0, sizeof(h->default_ref_list[j][i]));
                }
            }
        }
    }

    return 0;
}
Ejemplo n.º 16
0
static void run_postproc(AVCodecContext *avctx, AVFrame *frame)
{
    DDSContext *ctx = avctx->priv_data;
    int i, x_off;

    switch (ctx->postproc) {
    case DDS_ALPHA_EXP:
        /* Alpha-exponential mode divides each channel by the maximum
         * R, G or B value, and stores the multiplying factor in the
         * alpha channel. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing alpha exponent.\n");

        for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
            uint8_t *src = frame->data[0] + i;
            int r = src[0];
            int g = src[1];
            int b = src[2];
            int a = src[3];

            src[0] = r * a / 255;
            src[1] = g * a / 255;
            src[2] = b * a / 255;
            src[3] = 255;
        }
        break;
    case DDS_NORMAL_MAP:
        /* Normal maps work in the XYZ color space and they encode
         * X in R or in A, depending on the texture type, Y in G and
         * derive Z with a square root of the distance.
         *
         * http://www.realtimecollisiondetection.net/blog/?p=28 */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing normal map.\n");

        x_off = ctx->tex_ratio == 8 ? 0 : 3;
        for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
            uint8_t *src = frame->data[0] + i;
            int x = src[x_off];
            int y = src[1];
            int z = 127;

            int d = (255 * 255 - x * x - y * y) / 2;
            if (d > 0)
                z = lrint(sqrtf(d));

            src[0] = x;
            src[1] = y;
            src[2] = z;
            src[3] = 255;
        }
        break;
    case DDS_RAW_YCOCG:
        /* Data is Y-Co-Cg-A and not RGBA, but they are represented
         * with the same masks in the DDPF header. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing raw YCoCg.\n");

        for (i = 0; i < frame->linesize[0] * frame->height; i += 4) {
            uint8_t *src = frame->data[0] + i;
            int a  = src[0];
            int cg = src[1] - 128;
            int co = src[2] - 128;
            int y  = src[3];

            src[0] = av_clip_uint8(y + co - cg);
            src[1] = av_clip_uint8(y + cg);
            src[2] = av_clip_uint8(y - co - cg);
            src[3] = a;
        }
        break;
    case DDS_SWAP_ALPHA:
        /* Alpha and Luma are stored swapped. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing swapped Luma/Alpha.\n");

        for (i = 0; i < frame->linesize[0] * frame->height; i += 2) {
            uint8_t *src = frame->data[0] + i;
            FFSWAP(uint8_t, src[0], src[1]);
        }
        break;
    case DDS_SWIZZLE_A2XY:
        /* Swap R and G, often used to restore a standard RGTC2. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing A2XY swizzle.\n");
        do_swizzle(frame, 0, 1);
        break;
    case DDS_SWIZZLE_RBXG:
        /* Swap G and A, then B and new A (G). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RBXG swizzle.\n");
        do_swizzle(frame, 1, 3);
        do_swizzle(frame, 2, 3);
        break;
    case DDS_SWIZZLE_RGXB:
        /* Swap B and A. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RGXB swizzle.\n");
        do_swizzle(frame, 2, 3);
        break;
    case DDS_SWIZZLE_RXBG:
        /* Swap G and A. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RXBG swizzle.\n");
        do_swizzle(frame, 1, 3);
        break;
    case DDS_SWIZZLE_RXGB:
        /* Swap R and A (misleading name). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing RXGB swizzle.\n");
        do_swizzle(frame, 0, 3);
        break;
    case DDS_SWIZZLE_XGBR:
        /* Swap B and A, then R and new A (B). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing XGBR swizzle.\n");
        do_swizzle(frame, 2, 3);
        do_swizzle(frame, 0, 3);
        break;
    case DDS_SWIZZLE_XGXR:
        /* Swap G and A, then R and new A (G), then new R (G) and new G (A).
         * This variant does not store any B component. */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing XGXR swizzle.\n");
        do_swizzle(frame, 1, 3);
        do_swizzle(frame, 0, 3);
        do_swizzle(frame, 0, 1);
        break;
    case DDS_SWIZZLE_XRBG:
        /* Swap G and A, then R and new A (G). */
        av_log(avctx, AV_LOG_DEBUG, "Post-processing XRBG swizzle.\n");
        do_swizzle(frame, 1, 3);
        do_swizzle(frame, 0, 3);
        break;
    }
}
Ejemplo n.º 17
0
static void swap_channel_layouts_on_filter(AVFilterContext *filter)
{
    AVFilterLink *link = NULL;
    int i, j, k;

    for (i = 0; i < filter->nb_inputs; i++) {
        link = filter->inputs[i];

        if (link->type == AVMEDIA_TYPE_AUDIO &&
            link->out_channel_layouts->nb_channel_layouts == 1)
            break;
    }
    if (i == filter->nb_inputs)
        return;

    for (i = 0; i < filter->nb_outputs; i++) {
        AVFilterLink *outlink = filter->outputs[i];
        int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;

        if (outlink->type != AVMEDIA_TYPE_AUDIO ||
            outlink->in_channel_layouts->nb_channel_layouts < 2)
            continue;

        for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
            uint64_t  in_chlayout = link->out_channel_layouts->channel_layouts[0];
            uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
            int  in_channels      = av_get_channel_layout_nb_channels(in_chlayout);
            int out_channels      = av_get_channel_layout_nb_channels(out_chlayout);
            int count_diff        = out_channels - in_channels;
            int matched_channels, extra_channels;
            int score = 0;

            /* channel substitution */
            for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
                uint64_t cmp0 = ch_subst[k][0];
                uint64_t cmp1 = ch_subst[k][1];
                if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&
                    (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {
                    in_chlayout  &= ~cmp0;
                    out_chlayout &= ~cmp1;
                    /* add score for channel match, minus a deduction for
                       having to do the substitution */
                    score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;
                }
            }

            /* no penalty for LFE channel mismatch */
            if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&
                (out_chlayout & AV_CH_LOW_FREQUENCY))
                score += 10;
            in_chlayout  &= ~AV_CH_LOW_FREQUENCY;
            out_chlayout &= ~AV_CH_LOW_FREQUENCY;

            matched_channels = av_get_channel_layout_nb_channels(in_chlayout &
                                                                 out_chlayout);
            extra_channels   = av_get_channel_layout_nb_channels(out_chlayout &
                                                                 (~in_chlayout));
            score += 10 * matched_channels - 5 * extra_channels;

            if (score > best_score ||
                (count_diff < best_count_diff && score == best_score)) {
                best_score = score;
                best_idx   = j;
                best_count_diff = count_diff;
            }
        }
        av_assert0(best_idx >= 0);
        FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
               outlink->in_channel_layouts->channel_layouts[best_idx]);
    }

}