static void decode_mb_rows(VP8D_COMP *pbi) { VP8_COMMON *const pc = & pbi->common; MACROBLOCKD *const xd = & pbi->mb; MODE_INFO *lf_mic = xd->mode_info_context; int ibc = 0; int num_part = 1 << pc->multi_token_partition; int recon_yoffset, recon_uvoffset; int mb_row, mb_col; int mb_idx = 0; YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; int recon_y_stride = yv12_fb_new->y_stride; int recon_uv_stride = yv12_fb_new->uv_stride; unsigned char *ref_buffer[MAX_REF_FRAMES][3]; unsigned char *dst_buffer[3]; unsigned char *lf_dst[3]; unsigned char *eb_dst[3]; int i; int ref_fb_corrupted[MAX_REF_FRAMES]; ref_fb_corrupted[INTRA_FRAME] = 0; for(i = 1; i < MAX_REF_FRAMES; i++) { YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; ref_buffer[i][0] = this_fb->y_buffer; ref_buffer[i][1] = this_fb->u_buffer; ref_buffer[i][2] = this_fb->v_buffer; ref_fb_corrupted[i] = this_fb->corrupted; } /* Set up the buffer pointers */ eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer; eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer; eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer; xd->up_available = 0; /* Initialize the loop filter for this frame. */ if(pc->filter_level) vp8_loop_filter_frame_init(pc, xd, pc->filter_level); vp8_setup_intra_recon_top_line(yv12_fb_new); /* Decode the individual macro block */ for (mb_row = 0; mb_row < pc->mb_rows; mb_row++) { if (num_part > 1) { xd->current_bc = & pbi->mbc[ibc]; ibc++; if (ibc == num_part) ibc = 0; } recon_yoffset = mb_row * recon_y_stride * 16; recon_uvoffset = mb_row * recon_uv_stride * 8; /* reset contexts */ xd->above_context = pc->above_context; vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); xd->left_available = 0; xd->mb_to_top_edge = -((mb_row * 16) << 3); xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; xd->recon_above[0] = dst_buffer[0] + recon_yoffset; xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; xd->recon_left[0] = xd->recon_above[0] - 1; xd->recon_left[1] = xd->recon_above[1] - 1; xd->recon_left[2] = xd->recon_above[2] - 1; xd->recon_above[0] -= xd->dst.y_stride; xd->recon_above[1] -= xd->dst.uv_stride; xd->recon_above[2] -= xd->dst.uv_stride; /* TODO: move to outside row loop */ xd->recon_left_stride[0] = xd->dst.y_stride; xd->recon_left_stride[1] = xd->dst.uv_stride; setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], xd->recon_left[2], xd->dst.y_stride, xd->dst.uv_stride); for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) { /* Distance of Mb to the various image edges. * These are specified to 8th pel as they are always compared to values * that are in 1/8th pel units */ xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; #if CONFIG_ERROR_CONCEALMENT { int corrupt_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual) || vp8dx_bool_error(xd->current_bc); if (pbi->ec_active && xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME && corrupt_residual) { /* We have an intra block with corrupt coefficients, better to * conceal with an inter block. Interpolate MVs from neighboring * MBs. * * Note that for the first mb with corrupt residual in a frame, * we might not discover that before decoding the residual. That * happens after this check, and therefore no inter concealment * will be done. */ vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols, pc->mode_info_stride); } } #endif xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) { MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame; xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset; xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset; xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset; } else { // ref_frame is INTRA_FRAME, pre buffer should not be used. xd->pre.y_buffer = 0; xd->pre.u_buffer = 0; xd->pre.v_buffer = 0; } /* propagate errors from reference frames */ xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; decode_macroblock(pbi, xd, mb_idx); mb_idx++; xd->left_available = 1; /* check if the boolean decoder has suffered an error */ xd->corrupted |= vp8dx_bool_error(xd->current_bc); xd->recon_above[0] += 16; xd->recon_above[1] += 8; xd->recon_above[2] += 8; xd->recon_left[0] += 16; xd->recon_left[1] += 8; xd->recon_left[2] += 8; recon_yoffset += 16; recon_uvoffset += 8; ++xd->mode_info_context; /* next mb */ xd->above_context++; } /* adjust to the next row of mbs */ vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); ++xd->mode_info_context; /* skip prediction column */ xd->up_available = 1; if(pc->filter_level) { if(mb_row > 0) { if (pc->filter_type == NORMAL_LOOPFILTER) vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, recon_y_stride, recon_uv_stride, lf_dst[0], lf_dst[1], lf_dst[2]); else vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, recon_y_stride, recon_uv_stride, lf_dst[0], lf_dst[1], lf_dst[2]); if(mb_row > 1) { yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]); eb_dst[0] += recon_y_stride * 16; eb_dst[1] += recon_uv_stride * 8; eb_dst[2] += recon_uv_stride * 8; } lf_dst[0] += recon_y_stride * 16; lf_dst[1] += recon_uv_stride * 8; lf_dst[2] += recon_uv_stride * 8; lf_mic += pc->mb_cols; lf_mic++; /* Skip border mb */ } } else { if(mb_row > 0) { /**/ yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]); eb_dst[0] += recon_y_stride * 16; eb_dst[1] += recon_uv_stride * 8; eb_dst[2] += recon_uv_stride * 8; } } } if(pc->filter_level) { if (pc->filter_type == NORMAL_LOOPFILTER) vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, recon_y_stride, recon_uv_stride, lf_dst[0], lf_dst[1], lf_dst[2]); else vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, recon_y_stride, recon_uv_stride, lf_dst[0], lf_dst[1], lf_dst[2]); yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]); eb_dst[0] += recon_y_stride * 16; eb_dst[1] += recon_uv_stride * 8; eb_dst[2] += recon_uv_stride * 8; } yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]); yv12_extend_frame_top_c(yv12_fb_new); yv12_extend_frame_bottom_c(yv12_fb_new); }
static void decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd) { int recon_yoffset, recon_uvoffset; int mb_col; int ref_fb_idx = pc->lst_fb_idx; int dst_fb_idx = pc->new_fb_idx; int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride; vpx_memset(&pc->left_context, 0, sizeof(pc->left_context)); recon_yoffset = mb_row * recon_y_stride * 16; recon_uvoffset = mb_row * recon_uv_stride * 8; /* reset above block coeffs */ xd->above_context = pc->above_context; xd->up_available = (mb_row != 0); xd->mb_to_top_edge = -((mb_row * 16)) << 3; xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) { /* Distance of Mb to the various image edges. * These are specified to 8th pel as they are always compared to values * that are in 1/8th pel units */ xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; #if CONFIG_ERROR_CONCEALMENT { int corrupt_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual) || vp8dx_bool_error(xd->current_bc); if (pbi->ec_active && xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME && corrupt_residual) { /* We have an intra block with corrupt coefficients, better to * conceal with an inter block. Interpolate MVs from neighboring * MBs. * * Note that for the first mb with corrupt residual in a frame, * we might not discover that before decoding the residual. That * happens after this check, and therefore no inter concealment * will be done. */ vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols, pc->mode_info_stride); } } #endif xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); /* Select the appropriate reference frame for this MB */ if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) ref_fb_idx = pc->lst_fb_idx; else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ref_fb_idx = pc->gld_fb_idx; else ref_fb_idx = pc->alt_fb_idx; xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) { /* propagate errors from reference frames */ xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted; } decode_macroblock(pbi, xd, mb_row * pc->mb_cols + mb_col); /* check if the boolean decoder has suffered an error */ xd->corrupted |= vp8dx_bool_error(xd->current_bc); recon_yoffset += 16; recon_uvoffset += 8; ++xd->mode_info_context; /* next mb */ xd->above_context++; } /* adjust to the next row of mbs */ vp8_extend_mb_row( &pc->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8 ); ++xd->mode_info_context; /* skip prediction column */ }
static int escape124_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Escape124Context *s = avctx->priv_data; AVFrame *frame = data; GetBitContext gb; unsigned frame_flags, frame_size; unsigned i; unsigned superblock_index, cb_index = 1, superblock_col_index = 0, superblocks_per_row = avctx->width / 8, skip = -1; uint16_t* old_frame_data, *new_frame_data; unsigned old_stride, new_stride; int ret; init_get_bits(&gb, buf, buf_size * 8); // This call also guards the potential depth reads for the // codebook unpacking. if (!can_safely_read(&gb, 64)) return -1; frame_flags = get_bits_long(&gb, 32); frame_size = get_bits_long(&gb, 32); // Leave last frame unchanged // FIXME: Is this necessary? I haven't seen it in any real samples if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { if (!s->frame->data[0]) return AVERROR_INVALIDDATA; av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n"); *got_frame = 1; if ((ret = av_frame_ref(frame, s->frame)) < 0) return ret; return frame_size; } for (i = 0; i < 3; i++) { if (frame_flags & (1 << (17 + i))) { unsigned cb_depth, cb_size; if (i == 2) { // This codebook can be cut off at places other than // powers of 2, leaving some of the entries undefined. cb_size = get_bits_long(&gb, 20); cb_depth = av_log2(cb_size - 1) + 1; } else { cb_depth = get_bits(&gb, 4); if (i == 0) { // This is the most basic codebook: pow(2,depth) entries // for a depth-length key cb_size = 1 << cb_depth; } else { // This codebook varies per superblock // FIXME: I don't think this handles integer overflow // properly cb_size = s->num_superblocks << cb_depth; } } av_free(s->codebooks[i].blocks); s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size); if (!s->codebooks[i].blocks) return -1; } } if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } new_frame_data = (uint16_t*)frame->data[0]; new_stride = frame->linesize[0] / 2; old_frame_data = (uint16_t*)s->frame->data[0]; old_stride = s->frame->linesize[0] / 2; for (superblock_index = 0; superblock_index < s->num_superblocks; superblock_index++) { MacroBlock mb; SuperBlock sb; unsigned multi_mask = 0; if (skip == -1) { // Note that this call will make us skip the rest of the blocks // if the frame prematurely ends skip = decode_skip_count(&gb); } if (skip) { copy_superblock(new_frame_data, new_stride, old_frame_data, old_stride); } else { copy_superblock(sb.pixels, 8, old_frame_data, old_stride); while (can_safely_read(&gb, 1) && !get_bits1(&gb)) { unsigned mask; mb = decode_macroblock(s, &gb, &cb_index, superblock_index); mask = get_bits(&gb, 16); multi_mask |= mask; for (i = 0; i < 16; i++) { if (mask & mask_matrix[i]) { insert_mb_into_sb(&sb, mb, i); } } } if (can_safely_read(&gb, 1) && !get_bits1(&gb)) { unsigned inv_mask = get_bits(&gb, 4); for (i = 0; i < 4; i++) { if (inv_mask & (1 << i)) { multi_mask ^= 0xF << i*4; } else { multi_mask ^= get_bits(&gb, 4) << i*4; } } for (i = 0; i < 16; i++) { if (multi_mask & mask_matrix[i]) { if (!can_safely_read(&gb, 1)) break; mb = decode_macroblock(s, &gb, &cb_index, superblock_index); insert_mb_into_sb(&sb, mb, i); } } } else if (frame_flags & (1 << 16)) { while (can_safely_read(&gb, 1) && !get_bits1(&gb)) { mb = decode_macroblock(s, &gb, &cb_index, superblock_index); insert_mb_into_sb(&sb, mb, get_bits(&gb, 4)); } } copy_superblock(new_frame_data, new_stride, sb.pixels, 8); } superblock_col_index++; new_frame_data += 8; if (old_frame_data) old_frame_data += 8; if (superblock_col_index == superblocks_per_row) { new_frame_data += new_stride * 8 - superblocks_per_row * 8; if (old_frame_data) old_frame_data += old_stride * 8 - superblocks_per_row * 8; superblock_col_index = 0; } skip--; } av_log(NULL, AV_LOG_DEBUG, "Escape sizes: %i, %i, %i\n", frame_size, buf_size, get_bits_count(&gb) / 8); av_frame_unref(s->frame); if ((ret = av_frame_ref(s->frame, frame)) < 0) return ret; *got_frame = 1; return frame_size; }
static void decode_mb_rows(VP8D_COMP *pbi) { VP8_COMMON *const pc = & pbi->common; MACROBLOCKD *const xd = & pbi->mb; int ibc = 0; int num_part = 1 << pc->multi_token_partition; int recon_yoffset, recon_uvoffset; int mb_row, mb_col; int mb_idx = 0; int dst_fb_idx = pc->new_fb_idx; int recon_y_stride = pc->yv12_fb[dst_fb_idx].y_stride; int recon_uv_stride = pc->yv12_fb[dst_fb_idx].uv_stride; unsigned char *ref_buffer[MAX_REF_FRAMES][3]; unsigned char *dst_buffer[3]; int i; int ref_fb_index[MAX_REF_FRAMES]; int ref_fb_corrupted[MAX_REF_FRAMES]; ref_fb_corrupted[INTRA_FRAME] = 0; ref_fb_index[LAST_FRAME] = pc->lst_fb_idx; ref_fb_index[GOLDEN_FRAME] = pc->gld_fb_idx; ref_fb_index[ALTREF_FRAME] = pc->alt_fb_idx; for(i = 1; i < MAX_REF_FRAMES; i++) { ref_buffer[i][0] = pc->yv12_fb[ref_fb_index[i]].y_buffer; ref_buffer[i][1] = pc->yv12_fb[ref_fb_index[i]].u_buffer; ref_buffer[i][2] = pc->yv12_fb[ref_fb_index[i]].v_buffer; ref_fb_corrupted[i] = pc->yv12_fb[ref_fb_index[i]].corrupted; } dst_buffer[0] = pc->yv12_fb[dst_fb_idx].y_buffer; dst_buffer[1] = pc->yv12_fb[dst_fb_idx].u_buffer; dst_buffer[2] = pc->yv12_fb[dst_fb_idx].v_buffer; xd->up_available = 0; /* Decode the individual macro block */ for (mb_row = 0; mb_row < pc->mb_rows; mb_row++) { if (num_part > 1) { xd->current_bc = & pbi->mbc[ibc]; ibc++; if (ibc == num_part) ibc = 0; } recon_yoffset = mb_row * recon_y_stride * 16; recon_uvoffset = mb_row * recon_uv_stride * 8; /* reset contexts */ xd->above_context = pc->above_context; vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); xd->left_available = 0; xd->mb_to_top_edge = -((mb_row * 16)) << 3; xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; xd->recon_above[0] = dst_buffer[0] + recon_yoffset; xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; xd->recon_left[0] = xd->recon_above[0] - 1; xd->recon_left[1] = xd->recon_above[1] - 1; xd->recon_left[2] = xd->recon_above[2] - 1; xd->recon_above[0] -= xd->dst.y_stride; xd->recon_above[1] -= xd->dst.uv_stride; xd->recon_above[2] -= xd->dst.uv_stride; //TODO: move to outside row loop xd->recon_left_stride[0] = xd->dst.y_stride; xd->recon_left_stride[1] = xd->dst.uv_stride; for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) { /* Distance of Mb to the various image edges. * These are specified to 8th pel as they are always compared to values * that are in 1/8th pel units */ xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; #if CONFIG_ERROR_CONCEALMENT { int corrupt_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual) || vp8dx_bool_error(xd->current_bc); if (pbi->ec_active && xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME && corrupt_residual) { /* We have an intra block with corrupt coefficients, better to * conceal with an inter block. Interpolate MVs from neighboring * MBs. * * Note that for the first mb with corrupt residual in a frame, * we might not discover that before decoding the residual. That * happens after this check, and therefore no inter concealment * will be done. */ vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols, pc->mode_info_stride); } } #endif xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; xd->pre.y_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset; xd->pre.u_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset; xd->pre.v_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset; /* propagate errors from reference frames */ xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; decode_macroblock(pbi, xd, mb_idx); mb_idx++; xd->left_available = 1; /* check if the boolean decoder has suffered an error */ xd->corrupted |= vp8dx_bool_error(xd->current_bc); xd->recon_above[0] += 16; xd->recon_above[1] += 8; xd->recon_above[2] += 8; xd->recon_left[0] += 16; xd->recon_left[1] += 8; xd->recon_left[2] += 8; recon_yoffset += 16; recon_uvoffset += 8; ++xd->mode_info_context; /* next mb */ xd->above_context++; } /* adjust to the next row of mbs */ vp8_extend_mb_row( &pc->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8 ); ++xd->mode_info_context; /* skip prediction column */ xd->up_available = 1; } }