Esempio n. 1
0
void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
  BufferPool *const pool = cm->buffer_pool;
  int i;

  for (i = 0; i < FRAME_BUFFERS; ++i) {
    if (pool->frame_bufs[i].ref_count > 0 &&
        pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
      pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
      pool->frame_bufs[i].ref_count = 0;
    }
    vpx_free(pool->frame_bufs[i].mvs);
    pool->frame_bufs[i].mvs = NULL;
    vp9_free_frame_buffer(&pool->frame_bufs[i].buf);
  }

#if CONFIG_VP9_POSTPROC
  vp9_free_frame_buffer(&cm->post_proc_buffer);
  vp9_free_frame_buffer(&cm->post_proc_buffer_int);
#endif
}
Esempio n. 2
0
void vp9_remove_decompressor(VP9D_PTR ptr) {
  int i;
  VP9D_COMP *const pbi = (VP9D_COMP *)ptr;

  if (!pbi)
    return;

  vp9_remove_common(&pbi->common);
  vp9_worker_end(&pbi->lf_worker);
  vpx_free(pbi->lf_worker.data1);
  for (i = 0; i < pbi->num_tile_workers; ++i) {
    VP9Worker *const worker = &pbi->tile_workers[i];
    vp9_worker_end(worker);
    vpx_free(worker->data1);
    vpx_free(worker->data2);
  }
  vpx_free(pbi->tile_workers);
  vpx_free(pbi->mi_streams);
  vpx_free(pbi->above_context[0]);
  vpx_free(pbi->above_seg_context);
  vpx_free(pbi);
}
Esempio n. 3
0
void vp10_decoder_remove(VP10Decoder *pbi) {
  int i;

  vpx_get_worker_interface()->end(&pbi->lf_worker);
  vpx_free(pbi->lf_worker.data1);
  vpx_free(pbi->tile_data);
  for (i = 0; i < pbi->num_tile_workers; ++i) {
    VPxWorker *const worker = &pbi->tile_workers[i];
    vpx_get_worker_interface()->end(worker);
  }
  vpx_free(pbi->tile_worker_data);
  vpx_free(pbi->tile_worker_info);
  vpx_free(pbi->tile_workers);

  if (pbi->num_tile_workers > 0) {
    vp10_loop_filter_dealloc(&pbi->lf_row_sync);
  }

  vpx_free(pbi);
}
Esempio n. 4
0
static void free_mode_context(PICK_MODE_CONTEXT *ctx) {
  int i, k;
  vpx_free(ctx->zcoeff_blk);
  ctx->zcoeff_blk = 0;
  for (i = 0; i < MAX_MB_PLANE; ++i) {
    for (k = 0; k < 3; ++k) {
      vpx_free(ctx->coeff[i][k]);
      ctx->coeff[i][k] = 0;
      vpx_free(ctx->qcoeff[i][k]);
      ctx->qcoeff[i][k] = 0;
      vpx_free(ctx->dqcoeff[i][k]);
      ctx->dqcoeff[i][k] = 0;
      vpx_free(ctx->eobs[i][k]);
      ctx->eobs[i][k] = 0;
    }
  }

  for (i = 0; i < 2; ++i) {
    vpx_free(ctx->color_index_map[i]);
    ctx->color_index_map[i] = 0;
  }
}
Esempio n. 5
0
void vp9_decoder_remove(VP9Decoder *pbi) {
  VP9_COMMON *const cm = &pbi->common;
  int i;

  vp9_get_worker_interface()->end(&pbi->lf_worker);
  vpx_free(pbi->lf_worker.data1);
  vpx_free(pbi->tile_data);
  for (i = 0; i < pbi->num_tile_workers; ++i) {
    VP9Worker *const worker = &pbi->tile_workers[i];
    vp9_get_worker_interface()->end(worker);
  }
  vpx_free(pbi->tile_worker_data);
  vpx_free(pbi->tile_worker_info);
  vpx_free(pbi->tile_workers);

  if (pbi->num_tile_workers > 0) {
    vp9_loop_filter_dealloc(&pbi->lf_row_sync);
  }

  vp9_remove_common(cm);
  vpx_free(pbi);
}
void vp9_decoder_remove(VP9Decoder *pbi) {
  VP9_COMMON *const cm = &pbi->common;
  int i;

  vp9_get_worker_interface()->end(&pbi->lf_worker);
  vpx_free(pbi->lf_worker.data1);
  vpx_free(pbi->tile_data);
  for (i = 0; i < pbi->num_tile_workers; ++i) {
    VP9Worker *const worker = &pbi->tile_workers[i];
    vp9_get_worker_interface()->end(worker);
    vpx_free(worker->data1);
    vpx_free(worker->data2);
  }
  vpx_free(pbi->tile_workers);

  if (pbi->num_tile_workers) {
    const int sb_rows =
        mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
    vp9_loop_filter_dealloc(&pbi->lf_row_sync, sb_rows);
  }

  vp9_remove_common(cm);
  vpx_free(pbi);
}
Esempio n. 7
0
int vp8cx_create_encoder_threads(VP8_COMP *cpi)
{
    const VP8_COMMON * cm = &cpi->common;

    cpi->b_multi_threaded = 0;
    cpi->encoding_thread_count = 0;
    cpi->b_lpf_running = 0;

    if (cm->processor_core_count > 1 && cpi->oxcf.multi_threaded > 1)
    {
        int ithread;
        int th_count = cpi->oxcf.multi_threaded - 1;
        int rc = 0;

        /* don't allocate more threads than cores available */
        if (cpi->oxcf.multi_threaded > cm->processor_core_count)
            th_count = cm->processor_core_count - 1;

        /* we have th_count + 1 (main) threads processing one row each */
        /* no point to have more threads than the sync range allows */
        if(th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1))
        {
            th_count = (cm->mb_cols / cpi->mt_sync_range) - 1;
        }

        if(th_count == 0)
            return 0;

        CHECK_MEM_ERROR(cpi->h_encoding_thread,
                        vpx_malloc(sizeof(pthread_t) * th_count));
        CHECK_MEM_ERROR(cpi->h_event_start_encoding,
                        vpx_malloc(sizeof(sem_t) * th_count));
        CHECK_MEM_ERROR(cpi->mb_row_ei,
                        vpx_memalign(32, sizeof(MB_ROW_COMP) * th_count));
        vpx_memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count);
        CHECK_MEM_ERROR(cpi->en_thread_data,
                        vpx_malloc(sizeof(ENCODETHREAD_DATA) * th_count));

        sem_init(&cpi->h_event_end_encoding, 0, 0);

        cpi->b_multi_threaded = 1;
        cpi->encoding_thread_count = th_count;

        /*
        printf("[VP8:] multi_threaded encoding is enabled with %d threads\n\n",
               (cpi->encoding_thread_count +1));
        */

        for (ithread = 0; ithread < th_count; ithread++)
        {
            ENCODETHREAD_DATA *ethd = &cpi->en_thread_data[ithread];

            /* Setup block ptrs and offsets */
            vp8_setup_block_ptrs(&cpi->mb_row_ei[ithread].mb);
            vp8_setup_block_dptrs(&cpi->mb_row_ei[ithread].mb.e_mbd);

            sem_init(&cpi->h_event_start_encoding[ithread], 0, 0);

            ethd->ithread = ithread;
            ethd->ptr1 = (void *)cpi;
            ethd->ptr2 = (void *)&cpi->mb_row_ei[ithread];

            rc = pthread_create(&cpi->h_encoding_thread[ithread], 0,
                                thread_encoding_proc, ethd);
            if(rc)
                break;
        }

        if(rc)
        {
            /* shutdown other threads */
            cpi->b_multi_threaded = 0;
            for(--ithread; ithread >= 0; ithread--)
            {
                pthread_join(cpi->h_encoding_thread[ithread], 0);
                sem_destroy(&cpi->h_event_start_encoding[ithread]);
            }
            sem_destroy(&cpi->h_event_end_encoding);

            /* free thread related resources */
            vpx_free(cpi->h_event_start_encoding);
            vpx_free(cpi->h_encoding_thread);
            vpx_free(cpi->mb_row_ei);
            vpx_free(cpi->en_thread_data);

            return -1;
        }


        {
            LPFTHREAD_DATA * lpfthd = &cpi->lpf_thread_data;

            sem_init(&cpi->h_event_start_lpf, 0, 0);
            sem_init(&cpi->h_event_end_lpf, 0, 0);

            lpfthd->ptr1 = (void *)cpi;
            rc = pthread_create(&cpi->h_filter_thread, 0, thread_loopfilter,
                                lpfthd);

            if(rc)
            {
                /* shutdown other threads */
                cpi->b_multi_threaded = 0;
                for(--ithread; ithread >= 0; ithread--)
                {
                    sem_post(&cpi->h_event_start_encoding[ithread]);
                    pthread_join(cpi->h_encoding_thread[ithread], 0);
                    sem_destroy(&cpi->h_event_start_encoding[ithread]);
                }
                sem_destroy(&cpi->h_event_end_encoding);
                sem_destroy(&cpi->h_event_end_lpf);
                sem_destroy(&cpi->h_event_start_lpf);

                /* free thread related resources */
                vpx_free(cpi->h_event_start_encoding);
                vpx_free(cpi->h_encoding_thread);
                vpx_free(cpi->mb_row_ei);
                vpx_free(cpi->en_thread_data);

                return -2;
            }
        }
    }
    return 0;
}
Esempio n. 8
0
static void vp10_dec_free_mi(VP10_COMMON *cm) {
  vpx_free(cm->mip);
  cm->mip = NULL;
  vpx_free(cm->mi_grid_base);
  cm->mi_grid_base = NULL;
}
Esempio n. 9
0
// void separate_arf_mbs_byzz
static void separate_arf_mbs(VP9_COMP *cpi) {
  VP9_COMMON *const cm = &cpi->common;
  int mb_col, mb_row, offset, i;
  int mi_row, mi_col;
  int ncnt[4] = { 0 };
  int n_frames = cpi->mbgraph_n_frames;

  int *arf_not_zz;

  CHECK_MEM_ERROR(cm, arf_not_zz,
                  vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz),
                             1));

  // We are not interested in results beyond the alt ref itself.
  if (n_frames > cpi->rc.frames_till_gf_update_due)
    n_frames = cpi->rc.frames_till_gf_update_due;

  // defer cost to reference frames
  for (i = n_frames - 1; i >= 0; i--) {
    MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];

    for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
         offset += cm->mb_cols, mb_row++) {
      for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
        MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];

        int altref_err = mb_stats->ref[ALTREF_FRAME].err;
        int intra_err  = mb_stats->ref[INTRA_FRAME ].err;
        int golden_err = mb_stats->ref[GOLDEN_FRAME].err;

        // Test for altref vs intra and gf and that its mv was 0,0.
        if (altref_err > 1000 ||
            altref_err > intra_err ||
            altref_err > golden_err) {
          arf_not_zz[offset + mb_col]++;
        }
      }
    }
  }

  // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out
  // of bound access in segmentation_map
  for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
    for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
      // If any of the blocks in the sequence failed then the MB
      // goes in segment 0
      if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) {
        ncnt[0]++;
        cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0;
      } else {
        cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1;
        ncnt[1]++;
      }
    }
  }

  // Only bother with segmentation if over 10% of the MBs in static segment
  // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
  if (1) {
    // Note % of blocks that are marked as static
    if (cm->MBs)
      cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols);

    // This error case should not be reachable as this function should
    // never be called with the common data structure uninitialized.
    else
      cpi->static_mb_pct = 0;

    vp9_enable_segmentation(&cm->seg);
  } else {
    cpi->static_mb_pct = 0;
    vp9_disable_segmentation(&cm->seg);
  }

  // Free localy allocated storage
  vpx_free(arf_not_zz);
}
Esempio n. 10
0
static void vp9_dec_free_mi(VP9_COMMON *cm) {
  vpx_free(cm->mip);
  cm->mip = NULL;
}
Esempio n. 11
0
int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
                             int width, int height,
                             int ss_x, int ss_y,
#if CONFIG_VP9_HIGHBITDEPTH
                             int use_highbitdepth,
#endif
                             int border,
                             int byte_alignment,
                             vpx_codec_frame_buffer_t *fb,
                             vpx_get_frame_buffer_cb_fn_t cb,
                             void *cb_priv) {
  if (ybf) {
    const int vp9_byte_align = (byte_alignment == 0) ? 1 : byte_alignment;
    const int aligned_width = (width + 7) & ~7;
    const int aligned_height = (height + 7) & ~7;
    const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
    const uint64_t yplane_size = (aligned_height + 2 * border) *
                                 (uint64_t)y_stride + byte_alignment;
    const int uv_width = aligned_width >> ss_x;
    const int uv_height = aligned_height >> ss_y;
    const int uv_stride = y_stride >> ss_x;
    const int uv_border_w = border >> ss_x;
    const int uv_border_h = border >> ss_y;
    const uint64_t uvplane_size = (uv_height + 2 * uv_border_h) *
                                  (uint64_t)uv_stride + byte_alignment;

#if CONFIG_ALPHA
    const int alpha_width = aligned_width;
    const int alpha_height = aligned_height;
    const int alpha_stride = y_stride;
    const int alpha_border_w = border;
    const int alpha_border_h = border;
    const uint64_t alpha_plane_size = (alpha_height + 2 * alpha_border_h) *
                                      (uint64_t)alpha_stride + byte_alignment;
#if CONFIG_VP9_HIGHBITDEPTH
    const uint64_t frame_size = (1 + use_highbitdepth) *
        (yplane_size + 2 * uvplane_size + alpha_plane_size);
#else
    const uint64_t frame_size = yplane_size + 2 * uvplane_size +
                                alpha_plane_size;
#endif  // CONFIG_VP9_HIGHBITDEPTH
#else
#if CONFIG_VP9_HIGHBITDEPTH
    const uint64_t frame_size =
        (1 + use_highbitdepth) * (yplane_size + 2 * uvplane_size);
#else
    const uint64_t frame_size = yplane_size + 2 * uvplane_size;
#endif  // CONFIG_VP9_HIGHBITDEPTH
#endif  // CONFIG_ALPHA

    uint8_t *buf = NULL;

    if (cb != NULL) {
      const int align_addr_extra_size = 31;
      const uint64_t external_frame_size = frame_size + align_addr_extra_size;

      assert(fb != NULL);

      if (external_frame_size != (size_t)external_frame_size)
        return -1;

      // Allocation to hold larger frame, or first allocation.
      if (cb(cb_priv, (size_t)external_frame_size, fb) < 0)
        return -1;

      if (fb->data == NULL || fb->size < external_frame_size)
        return -1;

      ybf->buffer_alloc = (uint8_t *)yv12_align_addr(fb->data, 32);
    } else if (frame_size > (size_t)ybf->buffer_alloc_sz) {
      // Allocation to hold larger frame, or first allocation.
      vpx_free(ybf->buffer_alloc);
      ybf->buffer_alloc = NULL;

      if (frame_size != (size_t)frame_size)
        return -1;

      ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, (size_t)frame_size);
      if (!ybf->buffer_alloc)
        return -1;

      ybf->buffer_alloc_sz = (int)frame_size;

      // This memset is needed for fixing valgrind error from C loop filter
      // due to access uninitialized memory in frame border. It could be
      // removed if border is totally removed.
      memset(ybf->buffer_alloc, 0, ybf->buffer_alloc_sz);
    }

    /* Only support allocating buffers that have a border that's a multiple
     * of 32. The border restriction is required to get 16-byte alignment of
     * the start of the chroma rows without introducing an arbitrary gap
     * between planes, which would break the semantics of things like
     * vpx_img_set_rect(). */
    if (border & 0x1f)
      return -3;

    ybf->y_crop_width = width;
    ybf->y_crop_height = height;
    ybf->y_width  = aligned_width;
    ybf->y_height = aligned_height;
    ybf->y_stride = y_stride;

    ybf->uv_crop_width = (width + ss_x) >> ss_x;
    ybf->uv_crop_height = (height + ss_y) >> ss_y;
    ybf->uv_width = uv_width;
    ybf->uv_height = uv_height;
    ybf->uv_stride = uv_stride;

    ybf->border = border;
    ybf->frame_size = (int)frame_size;
    ybf->subsampling_x = ss_x;
    ybf->subsampling_y = ss_y;

    buf = ybf->buffer_alloc;
#if CONFIG_VP9_HIGHBITDEPTH
    if (use_highbitdepth) {
      // Store uint16 addresses when using 16bit framebuffers
      buf = CONVERT_TO_BYTEPTR(ybf->buffer_alloc);
      ybf->flags = YV12_FLAG_HIGHBITDEPTH;
    } else {
      ybf->flags = 0;
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH

    ybf->y_buffer = (uint8_t *)yv12_align_addr(
        buf + (border * y_stride) + border, vp9_byte_align);
    ybf->u_buffer = (uint8_t *)yv12_align_addr(
        buf + yplane_size + (uv_border_h * uv_stride) + uv_border_w,
        vp9_byte_align);
    ybf->v_buffer = (uint8_t *)yv12_align_addr(
        buf + yplane_size + uvplane_size + (uv_border_h * uv_stride) +
        uv_border_w, vp9_byte_align);

#if CONFIG_ALPHA
    ybf->alpha_width = alpha_width;
    ybf->alpha_height = alpha_height;
    ybf->alpha_stride = alpha_stride;
    ybf->alpha_buffer = (uint8_t *)yv12_align_addr(
        buf + yplane_size + 2 * uvplane_size +
        (alpha_border_h * alpha_stride) + alpha_border_w, vp9_byte_align);
#endif
    ybf->corrupted = 0; /* assume not corrupted by errors */
    return 0;
  }
  return -2;
}
Esempio n. 12
0
void vp9_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
  vpx_free(cr->map);
  vpx_free(cr->last_coded_q_map);
  vpx_free(cr);
}
Esempio n. 13
0
File: dering.c Progetto: jmvalin/aom
void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
                       MACROBLOCKD *xd, int global_level) {
    int r, c;
    int sbr, sbc;
    int nhsb, nvsb;
    od_dering_in *src[3];
    unsigned char *bskip;
    int dir[OD_DERING_NBLOCKS][OD_DERING_NBLOCKS] = {{0}};
    int stride;
    int bsize[3];
    int dec[3];
    int pli;
    int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
    nvsb = (cm->mi_rows + MI_BLOCK_SIZE - 1)/MI_BLOCK_SIZE;
    nhsb = (cm->mi_cols + MI_BLOCK_SIZE - 1)/MI_BLOCK_SIZE;
    bskip = vpx_malloc(sizeof(*bskip)*cm->mi_rows*cm->mi_cols);
    vp10_setup_dst_planes(xd->plane, frame, 0, 0);
    for (pli = 0; pli < 3; pli++) {
        dec[pli] = xd->plane[pli].subsampling_x;
        bsize[pli] = 8 >> dec[pli];
    }
    stride = bsize[0]*cm->mi_cols;
    for (pli = 0; pli < 3; pli++) {
        src[pli] = vpx_malloc(sizeof(*src)*cm->mi_rows*cm->mi_cols*64);
        for (r = 0; r < bsize[pli]*cm->mi_rows; ++r) {
            for (c = 0; c < bsize[pli]*cm->mi_cols; ++c) {
#if CONFIG_VPX_HIGHBITDEPTH
                if (cm->use_highbitdepth) {
                    src[pli][r * stride + c] =
                        CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
                        [r * xd->plane[pli].dst.stride + c];
                } else {
#endif
                    src[pli][r * stride + c] =
                        xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
#if CONFIG_VPX_HIGHBITDEPTH
                }
#endif
            }
        }
    }
    for (r = 0; r < cm->mi_rows; ++r) {
        for (c = 0; c < cm->mi_cols; ++c) {
            const MB_MODE_INFO *mbmi =
                &cm->mi_grid_visible[r * cm->mi_stride + c]->mbmi;
            bskip[r * cm->mi_cols + c] = mbmi->skip;
        }
    }
    for (sbr = 0; sbr < nvsb; sbr++) {
        for (sbc = 0; sbc < nhsb; sbc++) {
            int level;
            int nhb, nvb;
            nhb = VPXMIN(MI_BLOCK_SIZE, cm->mi_cols - MI_BLOCK_SIZE*sbc);
            nvb = VPXMIN(MI_BLOCK_SIZE, cm->mi_rows - MI_BLOCK_SIZE*sbr);
            for (pli = 0; pli < 3; pli++) {
                int16_t dst[MI_BLOCK_SIZE*MI_BLOCK_SIZE*8*8];
                int threshold;
#if DERING_REFINEMENT
                level = compute_level_from_index(
                            global_level,
                            cm->mi_grid_visible[MI_BLOCK_SIZE*sbr*cm->mi_stride +
                                                MI_BLOCK_SIZE*sbc]->mbmi.dering_gain);
#else
                level = global_level;
#endif
                /* FIXME: This is a temporary hack that uses more conservative
                   deringing for chroma. */
                if (pli) level = (level*5 + 4) >> 3;
                if (sb_all_skip(cm, sbr*MI_BLOCK_SIZE, sbc*MI_BLOCK_SIZE)) level = 0;
                threshold = level << coeff_shift;
                od_dering(
                    &OD_DERING_VTBL_C,
                    dst,
                    MI_BLOCK_SIZE*bsize[pli],
                    &src[pli][sbr*stride*bsize[pli]*MI_BLOCK_SIZE +
                              sbc*bsize[pli]*MI_BLOCK_SIZE],
                    stride, nhb, nvb, sbc, sbr, nhsb, nvsb, dec[pli], dir, pli,
                    &bskip[MI_BLOCK_SIZE*sbr*cm->mi_cols + MI_BLOCK_SIZE*sbc],
                    cm->mi_cols, threshold, OD_DERING_NO_CHECK_OVERLAP, coeff_shift);
                for (r = 0; r < bsize[pli]*nvb; ++r) {
                    for (c = 0; c < bsize[pli]*nhb; ++c) {
#if CONFIG_VPX_HIGHBITDEPTH
                        if (cm->use_highbitdepth) {
                            CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
                            [xd->plane[pli].dst.stride*(bsize[pli]*MI_BLOCK_SIZE*sbr + r)
                             + sbc*bsize[pli]*MI_BLOCK_SIZE + c] =
                                 dst[r * MI_BLOCK_SIZE * bsize[pli] + c];
                        } else {
#endif
                            xd->plane[pli].dst.buf[xd->plane[pli].dst.stride*
                                                   (bsize[pli]*MI_BLOCK_SIZE*sbr + r) +
                                                   sbc*bsize[pli]*MI_BLOCK_SIZE + c] =
                                                       dst[r * MI_BLOCK_SIZE * bsize[pli] + c];
#if CONFIG_VPX_HIGHBITDEPTH
                        }
#endif
                    }
                }
            }
        }
    }
    for (pli = 0; pli < 3; pli++) {
        vpx_free(src[pli]);
    }
    vpx_free(bskip);
}
Esempio n. 14
0
int vp9_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf,
                             int width, int height,
                             int ss_x, int ss_y, int border,
                             vpx_codec_frame_buffer_t *fb,
                             vpx_get_frame_buffer_cb_fn_t cb,
                             void *cb_priv) {
  if (ybf) {
    const int aligned_width = (width + 7) & ~7;
    const int aligned_height = (height + 7) & ~7;
    const int y_stride = ((aligned_width + 2 * border) + 31) & ~31;
    const int yplane_size = (aligned_height + 2 * border) * y_stride;
    const int uv_width = aligned_width >> ss_x;
    const int uv_height = aligned_height >> ss_y;
    const int uv_stride = y_stride >> ss_x;
    const int uv_border_w = border >> ss_x;
    const int uv_border_h = border >> ss_y;
    const int uvplane_size = (uv_height + 2 * uv_border_h) * uv_stride;
#if CONFIG_ALPHA
    const int alpha_width = aligned_width;
    const int alpha_height = aligned_height;
    const int alpha_stride = y_stride;
    const int alpha_border_w = border;
    const int alpha_border_h = border;
    const int alpha_plane_size = (alpha_height + 2 * alpha_border_h) *
                                 alpha_stride;
    const int frame_size = yplane_size + 2 * uvplane_size +
                           alpha_plane_size;
#else
    const int frame_size = yplane_size + 2 * uvplane_size;
#endif
    if (cb != NULL) {
      const int align_addr_extra_size = 31;
      const size_t external_frame_size = frame_size + align_addr_extra_size;

      assert(fb != NULL);

      // Allocation to hold larger frame, or first allocation.
      if (cb(cb_priv, external_frame_size, fb) < 0)
        return -1;

      if (fb->data == NULL || fb->size < external_frame_size)
        return -1;

      // This memset is needed for fixing valgrind error from C loop filter
      // due to access uninitialized memory in frame border. It could be
      // removed if border is totally removed.
      vpx_memset(fb->data, 0, fb->size);

      ybf->buffer_alloc = (uint8_t *)yv12_align_addr(fb->data, 32);
    } else if (frame_size > ybf->buffer_alloc_sz) {
      // Allocation to hold larger frame, or first allocation.
      if (ybf->buffer_alloc)
        vpx_free(ybf->buffer_alloc);
      ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, frame_size);
      if (!ybf->buffer_alloc)
        return -1;

      ybf->buffer_alloc_sz = frame_size;

      // This memset is needed for fixing valgrind error from C loop filter
      // due to access uninitialized memory in frame border. It could be
      // removed if border is totally removed.
      vpx_memset(ybf->buffer_alloc, 0, ybf->buffer_alloc_sz);
    }

    /* Only support allocating buffers that have a border that's a multiple
     * of 32. The border restriction is required to get 16-byte alignment of
     * the start of the chroma rows without introducing an arbitrary gap
     * between planes, which would break the semantics of things like
     * vpx_img_set_rect(). */
    if (border & 0x1f)
      return -3;

    ybf->y_crop_width = width;
    ybf->y_crop_height = height;
    ybf->y_width  = aligned_width;
    ybf->y_height = aligned_height;
    ybf->y_stride = y_stride;

    ybf->uv_crop_width = (width + ss_x) >> ss_x;
    ybf->uv_crop_height = (height + ss_y) >> ss_y;
    ybf->uv_width = uv_width;
    ybf->uv_height = uv_height;
    ybf->uv_stride = uv_stride;

    ybf->border = border;
    ybf->frame_size = frame_size;

    ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border;
    ybf->u_buffer = ybf->buffer_alloc + yplane_size +
                    (uv_border_h * uv_stride) + uv_border_w;
    ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size +
                    (uv_border_h * uv_stride) + uv_border_w;

#if CONFIG_ALPHA
    ybf->alpha_width = alpha_width;
    ybf->alpha_height = alpha_height;
    ybf->alpha_stride = alpha_stride;
    ybf->alpha_buffer = ybf->buffer_alloc + yplane_size + 2 * uvplane_size +
                        (alpha_border_h * alpha_stride) + alpha_border_w;
#endif
    ybf->corrupted = 0; /* assume not corrupted by errors */
    return 0;
  }
  return -2;
}