static int h261_decode_mb(H261Context *h){ MpegEncContext * const s = &h->s; int i, cbp, xy; cbp = 63; // Read mba do{ h->mba_diff = get_vlc2(&s->gb, h261_mba_vlc.table, H261_MBA_VLC_BITS, 2); /* Check for slice end */ /* NOTE: GOB can be empty (no MB data) or exist only of MBA_stuffing */ if (h->mba_diff == MBA_STARTCODE){ // start code h->gob_start_code_skipped = 1; return SLICE_END; } } while( h->mba_diff == MBA_STUFFING ); // stuffing if ( h->mba_diff < 0 ){ if ( get_bits_count(&s->gb) + 7 >= s->gb.size_in_bits ) return SLICE_END; av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d\n", s->mb_x, s->mb_y); return SLICE_ERROR; } h->mba_diff += 1; h->current_mba += h->mba_diff; if ( h->current_mba > MBA_STUFFING ) return SLICE_ERROR; s->mb_x= ((h->gob_number-1) % 2) * 11 + ((h->current_mba-1) % 11); s->mb_y= ((h->gob_number-1) / 2) * 3 + ((h->current_mba-1) / 11); xy = s->mb_x + s->mb_y * s->mb_stride; ff_init_block_index(s); ff_update_block_index(s); // Read mtype h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2); h->mtype = h261_mtype_map[h->mtype]; // Read mquant if ( IS_QUANT ( h->mtype ) ){ ff_set_qscale(s, get_bits(&s->gb, 5)); } s->mb_intra = IS_INTRA4x4(h->mtype); // Read mv if ( IS_16X16 ( h->mtype ) ){ // Motion vector data is included for all MC macroblocks. MVD is obtained from the macroblock vector by subtracting the // vector of the preceding macroblock. For this calculation the vector of the preceding macroblock is regarded as zero in the // following three situations: // 1) evaluating MVD for macroblocks 1, 12 and 23; // 2) evaluating MVD for macroblocks in which MBA does not represent a difference of 1; // 3) MTYPE of the previous macroblock was not MC. if ( ( h->current_mba == 1 ) || ( h->current_mba == 12 ) || ( h->current_mba == 23 ) || ( h->mba_diff != 1)) { h->current_mv_x = 0; h->current_mv_y = 0; } h->current_mv_x= decode_mv_component(&s->gb, h->current_mv_x); h->current_mv_y= decode_mv_component(&s->gb, h->current_mv_y); }else{ h->current_mv_x = 0; h->current_mv_y = 0; } // Read cbp if ( HAS_CBP( h->mtype ) ){ cbp = get_vlc2(&s->gb, h261_cbp_vlc.table, H261_CBP_VLC_BITS, 2) + 1; } if(s->mb_intra){ s->current_picture.mb_type[xy]= MB_TYPE_INTRA; goto intra; } //set motion vectors s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation s->mv[0][0][1] = h->current_mv_y * 2; intra: /* decode each block */ if(s->mb_intra || HAS_CBP(h->mtype)){ s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (h261_decode_block(h, s->block[i], i, cbp&32) < 0){ return SLICE_ERROR; } cbp+=cbp; } }else{ for (i = 0; i < 6; i++) s->block_last_index[i]= -1; } MPV_decode_mb(s, s->block); return SLICE_OK; }
void render_mbs() { H264Context *h = g_h; GPUH264Context * const g = &h->gpu; MpegEncContext * const s = &h->s; H264mb* blockStore = g->block_buffer; int i, l; int lists = (h->slice_type==FF_B_TYPE)?2:1; int dpb_pos = s->current_picture.gpu_dpb; printf("Attempting to motion compensate %d blocks\n", (g->end-g->start+1)); glNewList(dispList, GL_COMPILE); for(l=0; l < lists; l++) { glBegin(GL_QUADS); for(i= g->start; i <= g->end; i++) { const int mb_x = blockStore[i].mb_x; const int mb_y = blockStore[i].mb_y; const int mb_xy = mb_x + mb_y*s->mb_stride; const int mb_type = s->current_picture.mb_type[mb_xy]; int mv_x, mv_y, j; //RUDD TODO ignoring Intra blocks for now if(IS_INTER(mb_type)) { if(IS_16X16(mb_type) && IS_DIR(mb_type, 0, l)) { mv_x = blockStore[i].mv_cache[0][ scan8[0]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[0]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 16, 16, 0, 0, h->ref_list[l][h->ref_cache[l][ scan8[0] ]].gpu_dpb, dpb_pos); } else if(IS_16X8(mb_type)) { if(IS_DIR(mb_type, 0, l)) { mv_x = blockStore[i].mv_cache[l][ scan8[0]][0]; mv_y = blockStore[i].mv_cache[l][ scan8[0]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 16, 8, 0, 0, h->ref_list[l][h->ref_cache[l][ scan8[0] ]].gpu_dpb, dpb_pos); } if(IS_DIR(mb_type, 1, l)) { mv_x = blockStore[i].mv_cache[0][ scan8[8]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[8]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 16, 8, 0, -8, h->ref_list[l][h->ref_cache[l][ scan8[8] ]].gpu_dpb, dpb_pos); } } else if(IS_8X16(mb_type)) { if(IS_DIR(mb_type, 0, l)) { mv_x = blockStore[i].mv_cache[0][ scan8[0]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[0]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 8, 16, 0, 0, h->ref_list[l][h->ref_cache[l][ scan8[0] ]].gpu_dpb, dpb_pos); } if(IS_DIR(mb_type, 1, l)) { mv_x = blockStore[i].mv_cache[0][ scan8[4]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[4]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 8, 16, 8, 0, h->ref_list[l][h->ref_cache[l][ scan8[4] ]].gpu_dpb, dpb_pos); } } else { assert(IS_8X8(mb_type)); int j; for(j=0;j<4;j++) { const int sub_mb_type= h->sub_mb_type[j]; const int n= 4*j; int x_offset= (j&1); int y_offset= (j&2)>>1; if(!IS_DIR(sub_mb_type, 0, l)) continue; if(IS_SUB_8X8(sub_mb_type)) { mv_x = blockStore[i].mv_cache[0][ scan8[n]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 8, 8, 8*x_offset,-8*y_offset, h->ref_list[l][h->ref_cache[l][ scan8[n] ]].gpu_dpb, dpb_pos); } else if(IS_SUB_8X4(sub_mb_type)) { mv_x = blockStore[i].mv_cache[0][ scan8[n]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 8, 4, 8*x_offset,-8*y_offset, h->ref_list[l][h->ref_cache[l][ scan8[n] ]].gpu_dpb, dpb_pos); mv_x = blockStore[i].mv_cache[0][ scan8[n+2]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n+2]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 8, 4, 8*x_offset,-8*y_offset-4, h->ref_list[l][h->ref_cache[l][ scan8[n+2] ]].gpu_dpb, dpb_pos); } else if(IS_SUB_4X8(sub_mb_type)) { mv_x = blockStore[i].mv_cache[0][ scan8[n]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 4, 8, 8*x_offset,-8*y_offset, h->ref_list[l][h->ref_cache[l][ scan8[n] ]].gpu_dpb, dpb_pos); mv_x = blockStore[i].mv_cache[0][ scan8[n+1]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n+1]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 4, 8, 8*x_offset+4,-8*y_offset, h->ref_list[l][h->ref_cache[l][ scan8[n+1] ]].gpu_dpb, dpb_pos); } else { mv_x = blockStore[i].mv_cache[0][ scan8[n]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 4, 4, 8*x_offset,-8*y_offset, h->ref_list[l][h->ref_cache[l][ scan8[n] ]].gpu_dpb, dpb_pos); mv_x = blockStore[i].mv_cache[0][ scan8[n+1]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n+1]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 4, 4, 8*x_offset+4,-8*y_offset, h->ref_list[l][h->ref_cache[l][ scan8[n+1] ]].gpu_dpb, dpb_pos); mv_x = blockStore[i].mv_cache[0][ scan8[n+2]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n+2]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 4, 4, 8*x_offset,-8*y_offset-4, h->ref_list[l][h->ref_cache[l][ scan8[n+2] ]].gpu_dpb, dpb_pos); mv_x = blockStore[i].mv_cache[0][ scan8[n+3]][0]; mv_y = blockStore[i].mv_cache[0][ scan8[n+3]][1]; render_one_block(mb_x, mb_y, mv_x, mv_y, 4, 4, 8*x_offset+4,-8*y_offset-4, h->ref_list[l][h->ref_cache[l][ scan8[n+3] ]].gpu_dpb, dpb_pos); } } } } } } glEnd(); glEndList(); }
static void MCFUNC(hl_motion)(const H264Context *h, H264SliceContext *sl, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, qpel_mc_func(*qpix_put)[16], const h264_chroma_mc_func(*chroma_put), qpel_mc_func(*qpix_avg)[16], const h264_chroma_mc_func(*chroma_avg), const h264_weight_func *weight_op, const h264_biweight_func *weight_avg) { const int mb_xy = sl->mb_xy; const int mb_type = h->cur_pic.mb_type[mb_xy]; av_assert2(IS_INTER(mb_type)); if (HAVE_THREADS && (h->avctx->active_thread_type & FF_THREAD_FRAME)) await_references(h, sl); prefetch_motion(h, sl, 0, PIXEL_SHIFT, CHROMA_IDC); if (IS_16X16(mb_type)) { mc_part(h, sl, 0, 1, 16, 0, dest_y, dest_cb, dest_cr, 0, 0, qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0], weight_op, weight_avg, IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1)); } else if (IS_16X8(mb_type)) { mc_part(h, sl, 0, 0, 8, 8 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr, 0, 0, qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0], weight_op, weight_avg, IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1)); mc_part(h, sl, 8, 0, 8, 8 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr, 0, 4, qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0], weight_op, weight_avg, IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1)); } else if (IS_8X16(mb_type)) { mc_part(h, sl, 0, 0, 16, 8 * sl->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0, qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1], &weight_op[1], &weight_avg[1], IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1)); mc_part(h, sl, 4, 0, 16, 8 * sl->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0, qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1], &weight_op[1], &weight_avg[1], IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1)); } else { int i; av_assert2(IS_8X8(mb_type)); for (i = 0; i < 4; i++) { const int sub_mb_type = sl->sub_mb_type[i]; const int n = 4 * i; int x_offset = (i & 1) << 2; int y_offset = (i & 2) << 1; if (IS_SUB_8X8(sub_mb_type)) { mc_part(h, sl, n, 1, 8, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset, qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1], &weight_op[1], &weight_avg[1], IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1)); } else if (IS_SUB_8X4(sub_mb_type)) { mc_part(h, sl, n, 0, 4, 4 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr, x_offset, y_offset, qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1], &weight_op[1], &weight_avg[1], IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1)); mc_part(h, sl, n + 2, 0, 4, 4 << PIXEL_SHIFT, dest_y, dest_cb, dest_cr, x_offset, y_offset + 2, qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1], &weight_op[1], &weight_avg[1], IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1)); } else if (IS_SUB_4X8(sub_mb_type)) { mc_part(h, sl, n, 0, 8, 4 * sl->mb_linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset, qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2], &weight_op[2], &weight_avg[2], IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1)); mc_part(h, sl, n + 1, 0, 8, 4 * sl->mb_linesize, dest_y, dest_cb, dest_cr, x_offset + 2, y_offset, qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2], &weight_op[2], &weight_avg[2], IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1)); } else { int j; av_assert2(IS_SUB_4X4(sub_mb_type)); for (j = 0; j < 4; j++) { int sub_x_offset = x_offset + 2 * (j & 1); int sub_y_offset = y_offset + (j & 2); mc_part(h, sl, n + j, 1, 4, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset, qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2], &weight_op[2], &weight_avg[2], IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1)); } } } } if (USES_LIST(mb_type, 1)) prefetch_motion(h, sl, 1, PIXEL_SHIFT, CHROMA_IDC); }
static av_noinline void FUNC(hl_decode_mb)(H264Context *h) { MpegEncContext *const s = &h->s; const int mb_x = s->mb_x; const int mb_y = s->mb_y; const int mb_xy = h->mb_xy; const int mb_type = s->current_picture.f.mb_type[mb_xy]; uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize /*dct_offset*/; int i, j; int *block_offset = &h->block_offset[0]; const int transform_bypass = !SIMPLE && (s->qscale == 0 && h->sps.transform_bypass); /* is_h264 should always be true if SVQ3 is disabled. */ const int is_h264 = !CONFIG_SVQ3_DECODER || SIMPLE || s->codec_id == AV_CODEC_ID_H264; void (*idct_add)(uint8_t *dst, int16_t *block, int stride); const int block_h = 16 >> s->chroma_y_shift; const int chroma422 = CHROMA422; dest_y = s->current_picture.f.data[0] + ((mb_x << PIXEL_SHIFT) + mb_y * s->linesize) * 16; dest_cb = s->current_picture.f.data[1] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * s->uvlinesize * block_h; dest_cr = s->current_picture.f.data[2] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * s->uvlinesize * block_h; s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * s->linesize + (64 << PIXEL_SHIFT), s->linesize, 4); s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * s->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2); h->list_counts[mb_xy] = h->list_count; if (!SIMPLE && MB_FIELD) { linesize = h->mb_linesize = s->linesize * 2; uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; block_offset = &h->block_offset[48]; if (mb_y & 1) { // FIXME move out of this function? dest_y -= s->linesize * 15; dest_cb -= s->uvlinesize * (block_h - 1); dest_cr -= s->uvlinesize * (block_h - 1); } if (FRAME_MBAFF) { int list; for (list = 0; list < h->list_count; list++) { if (!USES_LIST(mb_type, list)) continue; if (IS_16X16(mb_type)) { int8_t *ref = &h->ref_cache[list][scan8[0]]; fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1); } else { for (i = 0; i < 16; i += 4) { int ref = h->ref_cache[list][scan8[i]]; if (ref >= 0) fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, (16 + ref) ^ (s->mb_y & 1), 1); } } } } } else { linesize = h->mb_linesize = s->linesize; uvlinesize = h->mb_uvlinesize = s->uvlinesize; // dct_offset = s->linesize * 16; } if (!SIMPLE && IS_INTRA_PCM(mb_type)) { const int bit_depth = h->sps.bit_depth_luma; if (PIXEL_SHIFT) { int j; GetBitContext gb; init_get_bits(&gb, (uint8_t *)h->mb, ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth); for (i = 0; i < 16; i++) { uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize); for (j = 0; j < 16; j++) tmp_y[j] = get_bits(&gb, bit_depth); } if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { if (!h->sps.chroma_format_idc) { for (i = 0; i < block_h; i++) { uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize); for (j = 0; j < 8; j++) { tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1); } } } else { for (i = 0; i < block_h; i++) { uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); for (j = 0; j < 8; j++) tmp_cb[j] = get_bits(&gb, bit_depth); } for (i = 0; i < block_h; i++) { uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize); for (j = 0; j < 8; j++) tmp_cr[j] = get_bits(&gb, bit_depth); } } } } else { for (i = 0; i < 16; i++) memcpy(dest_y + i * linesize, (uint8_t *)h->mb + i * 16, 16); if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { if (!h->sps.chroma_format_idc) { for (i = 0; i < 8; i++) { memset(dest_cb + i*uvlinesize, 1 << (bit_depth - 1), 8); memset(dest_cr + i*uvlinesize, 1 << (bit_depth - 1), 8); } } else { uint8_t *src_cb = (uint8_t *)h->mb + 256; uint8_t *src_cr = (uint8_t *)h->mb + 256 + block_h * 8; for (i = 0; i < block_h; i++) { memcpy(dest_cb + i * uvlinesize, src_cb + i * 8, 8); memcpy(dest_cr + i * uvlinesize, src_cr + i * 8, 8); } } } } } else { if (IS_INTRA(mb_type)) { if (h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT); if (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { h->hpc.pred8x8[h->chroma_pred_mode](dest_cb, uvlinesize); h->hpc.pred8x8[h->chroma_pred_mode](dest_cr, uvlinesize); } hl_decode_mb_predict_luma(h, mb_type, is_h264, SIMPLE, transform_bypass, PIXEL_SHIFT, block_offset, linesize, dest_y, 0); if (h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, 0, SIMPLE, PIXEL_SHIFT); } else if (is_h264) { if (chroma422) { FUNC(hl_motion_422)(h, dest_y, dest_cb, dest_cr, s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, h->h264dsp.weight_h264_pixels_tab, h->h264dsp.biweight_h264_pixels_tab); } else { FUNC(hl_motion_420)(h, dest_y, dest_cb, dest_cr, s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, h->h264dsp.weight_h264_pixels_tab, h->h264dsp.biweight_h264_pixels_tab); } } hl_decode_mb_idct_luma(h, mb_type, is_h264, SIMPLE, transform_bypass, PIXEL_SHIFT, block_offset, linesize, dest_y, 0); if ((SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) && (h->cbp & 0x30)) { uint8_t *dest[2] = { dest_cb, dest_cr }; if (transform_bypass) { if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 && (h->chroma_pred_mode == VERT_PRED8x8 || h->chroma_pred_mode == HOR_PRED8x8)) { h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0], block_offset + 16, h->mb + (16 * 16 * 1 << PIXEL_SHIFT), uvlinesize); h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1], block_offset + 32, h->mb + (16 * 16 * 2 << PIXEL_SHIFT), uvlinesize); } else { idct_add = s->dsp.add_pixels4; for (j = 1; j < 3; j++) { for (i = j * 16; i < j * 16 + 4; i++) if (h->non_zero_count_cache[scan8[i]] || dctcoef_get(h->mb, PIXEL_SHIFT, i * 16)) idct_add(dest[j - 1] + block_offset[i], h->mb + (i * 16 << PIXEL_SHIFT), uvlinesize); if (chroma422) { for (i = j * 16 + 4; i < j * 16 + 8; i++) if (h->non_zero_count_cache[scan8[i + 4]] || dctcoef_get(h->mb, PIXEL_SHIFT, i * 16)) idct_add(dest[j - 1] + block_offset[i + 4], h->mb + (i * 16 << PIXEL_SHIFT), uvlinesize); } } } } else { if (is_h264) { int qp[2]; if (chroma422) { qp[0] = h->chroma_qp[0] + 3; qp[1] = h->chroma_qp[1] + 3; } else { qp[0] = h->chroma_qp[0]; qp[1] = h->chroma_qp[1]; } if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]]) h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 1 << PIXEL_SHIFT), h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]); if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]]) h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 2 << PIXEL_SHIFT), h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]); h->h264dsp.h264_idct_add8(dest, block_offset, h->mb, uvlinesize, h->non_zero_count_cache); } else if (CONFIG_SVQ3_DECODER) { h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 1, h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][h->chroma_qp[0]][0]); h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 2, h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][h->chroma_qp[1]][0]); for (j = 1; j < 3; j++) { for (i = j * 16; i < j * 16 + 4; i++) if (h->non_zero_count_cache[scan8[i]] || h->mb[i * 16]) { uint8_t *const ptr = dest[j - 1] + block_offset[i]; ff_svq3_add_idct_c(ptr, h->mb + i * 16, uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2); } } } } } } if (h->cbp || IS_INTRA(mb_type)) { s->dsp.clear_blocks(h->mb); s->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); } }
static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h) { MpegEncContext *const s = &h->s; const int mb_x = s->mb_x; const int mb_y = s->mb_y; const int mb_xy = h->mb_xy; const int mb_type = s->current_picture.f.mb_type[mb_xy]; uint8_t *dest[3]; int linesize; int i, j, p; int *block_offset = &h->block_offset[0]; const int transform_bypass = !SIMPLE && (s->qscale == 0 && h->sps.transform_bypass); const int plane_count = (SIMPLE || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) ? 3 : 1; for (p = 0; p < plane_count; p++) { dest[p] = s->current_picture.f.data[p] + ((mb_x << PIXEL_SHIFT) + mb_y * s->linesize) * 16; s->vdsp.prefetch(dest[p] + (s->mb_x & 3) * 4 * s->linesize + (64 << PIXEL_SHIFT), s->linesize, 4); } h->list_counts[mb_xy] = h->list_count; if (!SIMPLE && MB_FIELD) { linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize * 2; block_offset = &h->block_offset[48]; if (mb_y & 1) // FIXME move out of this function? for (p = 0; p < 3; p++) dest[p] -= s->linesize * 15; if (FRAME_MBAFF) { int list; for (list = 0; list < h->list_count; list++) { if (!USES_LIST(mb_type, list)) continue; if (IS_16X16(mb_type)) { int8_t *ref = &h->ref_cache[list][scan8[0]]; fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (s->mb_y & 1), 1); } else { for (i = 0; i < 16; i += 4) { int ref = h->ref_cache[list][scan8[i]]; if (ref >= 0) fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, (16 + ref) ^ (s->mb_y & 1), 1); } } } } } else { linesize = h->mb_linesize = h->mb_uvlinesize = s->linesize; } if (!SIMPLE && IS_INTRA_PCM(mb_type)) { if (PIXEL_SHIFT) { const int bit_depth = h->sps.bit_depth_luma; GetBitContext gb; init_get_bits(&gb, (uint8_t *)h->mb, 768 * bit_depth); for (p = 0; p < plane_count; p++) for (i = 0; i < 16; i++) { uint16_t *tmp = (uint16_t *)(dest[p] + i * linesize); for (j = 0; j < 16; j++) tmp[j] = get_bits(&gb, bit_depth); } } else { for (p = 0; p < plane_count; p++) for (i = 0; i < 16; i++) memcpy(dest[p] + i * linesize, (uint8_t *)h->mb + p * 256 + i * 16, 16); } } else { if (IS_INTRA(mb_type)) { if (h->deblocking_filter) xchg_mb_border(h, dest[0], dest[1], dest[2], linesize, linesize, 1, 1, SIMPLE, PIXEL_SHIFT); for (p = 0; p < plane_count; p++) hl_decode_mb_predict_luma(h, mb_type, 1, SIMPLE, transform_bypass, PIXEL_SHIFT, block_offset, linesize, dest[p], p); if (h->deblocking_filter) xchg_mb_border(h, dest[0], dest[1], dest[2], linesize, linesize, 0, 1, SIMPLE, PIXEL_SHIFT); } else { FUNC(hl_motion_444)(h, dest[0], dest[1], dest[2], s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, h->h264dsp.weight_h264_pixels_tab, h->h264dsp.biweight_h264_pixels_tab); } for (p = 0; p < plane_count; p++) hl_decode_mb_idct_luma(h, mb_type, 1, SIMPLE, transform_bypass, PIXEL_SHIFT, block_offset, linesize, dest[p], p); } if (h->cbp || IS_INTRA(mb_type)) { s->dsp.clear_blocks(h->mb); s->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); } }
void motion_execute(H264_Slice_GlbARGs *SLICE_T, H264_MB_Ctrl_DecARGs *dmb, uint8_t *recon_buf, uint8_t *motion_dha) { uint8_t *motion_douty, *motion_doutc; uint8_t *motion_dsa = motion_dha + 0x108; const int mb_type= dmb->mb_type; motion_douty = recon_buf; motion_doutc = recon_buf + PREVIOUS_OFFSET_U; SET_REG1_DSTA(TCSM1_PADDR((int)motion_douty)); SET_REG1_DSA(TCSM1_PADDR((int)motion_dsa)); SET_REG2_DSTA(TCSM1_PADDR((int)motion_doutc)); SET_REG2_DSA(TCSM1_PADDR((int)motion_dsa)); volatile int *tdd = (int *)motion_dha; int tkn = 0; motion_dsa[0] = 0x0; tdd++; if(IS_16X16(mb_type)){ motion_task(SLICE_T, dmb, 0, 0, tdd, &tkn, IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), 3/*blkh*/, 3/*blkw*/, 0/*boy*/, 0/*box*/); }else if(IS_16X8(mb_type)){ motion_task(SLICE_T, dmb, 0, 0, tdd, &tkn, IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), 2/*blkh*/, 3/*blkw*/, 0/*boy*/, 0/*box*/); motion_task(SLICE_T, dmb, 1, 2, tdd, &tkn, IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), 2/*blkh*/, 3/*blkw*/, 2/*boy*/, 0/*box*/); }else if(IS_8X16(mb_type)){ motion_task(SLICE_T, dmb, 0, 0, tdd, &tkn, IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), 3/*blkh*/, 2/*blkw*/, 0/*boy*/, 0/*box*/); motion_task(SLICE_T, dmb, 1, 1, tdd, &tkn, IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), 3/*blkh*/, 2/*blkw*/, 0/*boy*/, 2/*box*/); }else{ int i; int mv_n=0; for(i=0; i<4; i++){ const int sub_mb_type= dmb->sub_mb_type[i]; if(IS_SUB_8X8(sub_mb_type)){ motion_task(SLICE_T, dmb, mv_n, i, tdd, &tkn, IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), 2/*blkh*/, 2/*blkw*/, (i & 0x2)/*boy*/, (i & 0x1)*2/*box*/); mv_n++; }else if(IS_SUB_8X4(sub_mb_type)){ motion_task(SLICE_T, dmb, mv_n, i, tdd, &tkn, IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), 1/*blkh*/, 2/*blkw*/, (i & 0x2)/*boy*/, (i & 0x1)*2/*box*/); mv_n++; motion_task(SLICE_T, dmb, mv_n, i, tdd, &tkn, IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), 1/*blkh*/, 2/*blkw*/, (i & 0x2)+1/*boy*/, (i & 0x1)*2/*box*/); mv_n++; }else if(IS_SUB_4X8(sub_mb_type)){ motion_task(SLICE_T, dmb, mv_n, i, tdd, &tkn, IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), 2/*blkh*/, 1/*blkw*/, (i & 0x2)/*boy*/, (i & 0x1)*2/*box*/); mv_n++; motion_task(SLICE_T, dmb, mv_n, i, tdd, &tkn, IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), 2/*blkh*/, 1/*blkw*/, (i & 0x2)/*boy*/, (i & 0x1)*2+1/*box*/); mv_n++; }else{ int j; for(j=0; j<4; j++){ motion_task(SLICE_T, dmb, mv_n, i, tdd, &tkn, IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), 1/*blkh*/, 1/*blkw*/, (i & 0x2) + (j & 0x2)/2/*boy*/, (i & 0x1)*2 + (j & 0x1)/*box*/); mv_n++; } //j } //BLK4X4 } //i } //BLK8X8 tdd[2*tkn-1] |= 0x1<<TDD_DOE_SFT; tdd[-1] = TDD_HEAD(1,/*vld*/ 1,/*lk*/ 0,/*sync*/ 1,/*ch1pel*/ 2,/*ch2pel*/ TDD_POS_SPEC,/*posmd*/ TDD_MV_AUTO,/*mvmd*/ 1,/*ch2en*/ tkn,/*tkn*/ dmb->mb_y,/*mby*/ dmb->mb_x/*mbx*/); tdd[2*tkn] = TDD_HEAD(1,/*vld*/ 0,/*lk*/ 1,/*sync*/ 1,/*ch1pel*/ 2,/*ch2pel*/ TDD_POS_SPEC,/*posmd*/ TDD_MV_AUTO,/*mvmd*/ 1,/*ch2en*/ 0,/*tkn*/ 0xFF,/*mby*/ 0xFF/*mbx*/); }