Ejemplo n.º 1
0
static void dca_qmf_32_subbands(float samples_in[32][8], int sb_act,
                                SynthFilterContext *synth, FFTContext *imdct,
                                float synth_buf_ptr[512],
                                int *synth_buf_offset, float synth_buf2[32],
                                const float window[512], float *samples_out,
                                float raXin[32], float scale)
{
    int i;
    int subindex;

    for (i = sb_act; i < 32; i++)
        raXin[i] = 0.0;

    /* Reconstructed channel sample index */
    for (subindex = 0; subindex < 8; subindex++) {
        /* Load in one sample from each subband and clear inactive subbands */
        for (i = 0; i < sb_act; i++) {
            unsigned sign = (i - 1) & 2;
            uint32_t v    = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30;
            AV_WN32A(&raXin[i], v);
        }

        synth->synth_filter_float(imdct, synth_buf_ptr, synth_buf_offset,
                                  synth_buf2, window, samples_out, raXin,
                                  scale);
        samples_out += 32;
    }
}
Ejemplo n.º 2
0
/* FIXME: This is adapted from ff_h264_decode_nal, avoiding duplication
 * between these functions would be nice. */
int ff_hevc_extract_rbsp(HEVCContext *s, const uint8_t *src, int length,
                         HEVCNAL *nal)
{
    int i, si, di;
    uint8_t *dst;

    if (s)
        s->skipped_bytes = 0;
#define STARTCODE_TEST                                                  \
        if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) {     \
            if (src[i + 2] != 3) {                                      \
                /* startcode, so we must be past the end */             \
                length = i;                                             \
            }                                                           \
            break;                                                      \
        }
#if HAVE_FAST_UNALIGNED
#define FIND_FIRST_ZERO                                                 \
        if (i > 0 && !src[i])                                           \
            i--;                                                        \
        while (src[i])                                                  \
            i++
#if HAVE_FAST_64BIT
    for (i = 0; i + 1 < length; i += 9) {
        if (!((~AV_RN64A(src + i) &
               (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
              0x8000800080008080ULL))
            continue;
        FIND_FIRST_ZERO;
        STARTCODE_TEST;
        i -= 7;
    }
#else
    for (i = 0; i + 1 < length; i += 5) {
        if (!((~AV_RN32A(src + i) &
               (AV_RN32A(src + i) - 0x01000101U)) &
              0x80008080U))
            continue;
        FIND_FIRST_ZERO;
        STARTCODE_TEST;
        i -= 3;
    }
#endif /* HAVE_FAST_64BIT */
#else
    for (i = 0; i + 1 < length; i += 2) {
        if (src[i])
            continue;
        if (i > 0 && src[i - 1] == 0)
            i--;
        STARTCODE_TEST;
    }
#endif /* HAVE_FAST_UNALIGNED */

    if (i >= length - 1) { // no escaped 0
        nal->data     =
        nal->raw_data = src;
        nal->size     =
        nal->raw_size = length;
        return length;
    }

    av_fast_malloc(&nal->rbsp_buffer, &nal->rbsp_buffer_size,
                   length + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!nal->rbsp_buffer)
        return AVERROR(ENOMEM);

    dst = nal->rbsp_buffer;

    memcpy(dst, src, i);
    si = di = i;
    while (si + 2 < length) {
        // remove escapes (very rare 1:2^22)
        if (src[si + 2] > 3) {
            dst[di++] = src[si++];
            dst[di++] = src[si++];
        } else if (src[si] == 0 && src[si + 1] == 0) {
            if (src[si + 2] == 3) { // escape
                dst[di++] = 0;
                dst[di++] = 0;
                si       += 3;

                if (s) {
                        s->skipped_bytes++;
                        if (s->skipped_bytes_pos_size < s->skipped_bytes) {
                        s->skipped_bytes_pos_size *= 2;
                        av_reallocp_array(&s->skipped_bytes_pos,
                                s->skipped_bytes_pos_size,
                                sizeof(*s->skipped_bytes_pos));
                        if (!s->skipped_bytes_pos)
                                return AVERROR(ENOMEM);
                        }
                        if (s->skipped_bytes_pos)
                        s->skipped_bytes_pos[s->skipped_bytes-1] = di - 1;
                }
                continue;
            } else // next start code
                goto nsc;
        }

        dst[di++] = src[si++];
    }
    while (si < length)
        dst[di++] = src[si++];

nsc:
    memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    nal->data = dst;
    nal->size = di;
    nal->raw_data = src;
    nal->raw_size = si;
    return si;
}
Ejemplo n.º 3
0
static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
    MpegEncContext * const s = &h->s;
    int b8_stride = 2;
    int b4_stride = h->b_stride;
    int mb_xy = h->mb_xy, mb_y = s->mb_y;
    int mb_type_col[2];
    const int16_t (*l1mv0)[2], (*l1mv1)[2];
    const int8_t *l1ref0, *l1ref1;
    const int is_b8x8 = IS_8X8(*mb_type);
    unsigned int sub_mb_type= MB_TYPE_L0L1;
    int i8, i4;
    int ref[2];
    int mv[2];
    int list;

    assert(h->ref_list[1][0].f.reference & 3);

    await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));

#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)


    /* ref = min(neighbors) */
    for(list=0; list<2; list++){
        int left_ref = h->ref_cache[list][scan8[0] - 1];
        int top_ref  = h->ref_cache[list][scan8[0] - 8];
        int refc = h->ref_cache[list][scan8[0] - 8 + 4];
        const int16_t *C= h->mv_cache[list][ scan8[0] - 8 + 4];
        if(refc == PART_NOT_AVAILABLE){
            refc = h->ref_cache[list][scan8[0] - 8 - 1];
            C    = h-> mv_cache[list][scan8[0] - 8 - 1];
        }
        ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc);
        if(ref[list] >= 0){
            //this is just pred_motion() but with the cases removed that cannot happen for direct blocks
            const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
            const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];

            int match_count= (left_ref==ref[list]) + (top_ref==ref[list]) + (refc==ref[list]);
            if(match_count > 1){ //most common
                mv[list]= pack16to32(mid_pred(A[0], B[0], C[0]),
                                     mid_pred(A[1], B[1], C[1]) );
            }else {
                assert(match_count==1);
                if(left_ref==ref[list]){
                    mv[list]= AV_RN32A(A);
                }else if(top_ref==ref[list]){
                    mv[list]= AV_RN32A(B);
                }else{
                    mv[list]= AV_RN32A(C);
                }
            }
        }else{
            int mask= ~(MB_TYPE_L0 << (2*list));
            mv[list] = 0;
            ref[list] = -1;
            if(!is_b8x8)
                *mb_type &= mask;
            sub_mb_type &= mask;
        }
    }
    if(ref[0] < 0 && ref[1] < 0){
        ref[0] = ref[1] = 0;
        if(!is_b8x8)
            *mb_type |= MB_TYPE_L0L1;
        sub_mb_type |= MB_TYPE_L0L1;
    }

    if(!(is_b8x8|mv[0]|mv[1])){
        fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
        fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
        fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
        fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
        *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
        return;
    }

    if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
        if (!IS_INTERLACED(*mb_type)) {                          //     AFR/FR    -> AFL/FL
            mb_y = (s->mb_y&~1) + h->col_parity;
            mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
            b8_stride = 0;
        }else{
            mb_y  += h->col_fieldoff;
            mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
        }
        goto single_col;
    }else{                                               // AFL/AFR/FR/FL -> AFR/FR
        if(IS_INTERLACED(*mb_type)){                     // AFL       /FL -> AFR/FR
            mb_y = s->mb_y&~1;
            mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
            mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
            mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
            b8_stride = 2+4*s->mb_stride;
            b4_stride *= 6;
            if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
                mb_type_col[0] &= ~MB_TYPE_INTERLACED;
                mb_type_col[1] &= ~MB_TYPE_INTERLACED;
            }

            sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
            if(    (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
                && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
                && !is_b8x8){
                *mb_type   |= MB_TYPE_16x8 |MB_TYPE_DIRECT2; /* B_16x8 */
            }else{
                *mb_type   |= MB_TYPE_8x8;
            }
        }else{                                           //     AFR/FR    -> AFR/FR
single_col:
            mb_type_col[0] =
            mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];

            sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
            if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
                *mb_type   |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_16x16 */
            }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
                *mb_type   |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
            }else{
                if(!h->sps.direct_8x8_inference_flag){
                    /* FIXME save sub mb types from previous frames (or derive from MVs)
                    * so we know exactly what block size to use */
                    sub_mb_type += (MB_TYPE_8x8-MB_TYPE_16x16); /* B_SUB_4x4 */
                }
                *mb_type   |= MB_TYPE_8x8;
            }
        }
    }

    await_reference_mb_row(h, &h->ref_list[1][0], mb_y);

    l1mv0  = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
    l1mv1  = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
    l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
    l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
    if(!b8_stride){
        if(s->mb_y&1){
            l1ref0 += 2;
            l1ref1 += 2;
            l1mv0  +=  2*b4_stride;
            l1mv1  +=  2*b4_stride;
        }
    }


        if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
            int n=0;
            for(i8=0; i8<4; i8++){
                int x8 = i8&1;
                int y8 = i8>>1;
                int xy8 = x8+y8*b8_stride;
                int xy4 = 3*x8+y8*b4_stride;
                int a,b;

                if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
                    continue;
                h->sub_mb_type[i8] = sub_mb_type;

                fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
                if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref
                   && (   (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
                       || (l1ref0[xy8]  < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
                    a=b=0;
                    if(ref[0] > 0)
                        a= mv[0];
                    if(ref[1] > 0)
                        b= mv[1];
                    n++;
                }else{
                    a= mv[0];
                    b= mv[1];
                }
                fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
                fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
            }
            if(!is_b8x8 && !(n&3))
                *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
        }else if(IS_16X16(*mb_type)){