コード例 #1
0
ファイル: pthread.c プロジェクト: AlexanderGarmash/ffmpeg-ndk
static void frame_thread_free(AVCodecContext *avctx, int thread_count)
{
    FrameThreadContext *fctx = avctx->thread_opaque;
    AVCodec *codec = avctx->codec;
    int i;

    park_frame_worker_threads(fctx, thread_count);

    if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
        update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0);

    fctx->die = 1;

    for (i = 0; i < thread_count; i++) {
        PerThreadContext *p = &fctx->threads[i];

        pthread_mutex_lock(&p->mutex);
        pthread_cond_signal(&p->input_cond);
        pthread_mutex_unlock(&p->mutex);

        if (p->thread_init)
            pthread_join(p->thread, NULL);
        p->thread_init=0;

        if (codec->close)
            codec->close(p->avctx);

        avctx->codec = NULL;

        release_delayed_buffers(p);
    }

    for (i = 0; i < thread_count; i++) {
        PerThreadContext *p = &fctx->threads[i];

        avcodec_default_free_buffers(p->avctx);

        pthread_mutex_destroy(&p->mutex);
        pthread_mutex_destroy(&p->progress_mutex);
        pthread_cond_destroy(&p->input_cond);
        pthread_cond_destroy(&p->progress_cond);
        pthread_cond_destroy(&p->output_cond);
        av_freep(&p->avpkt.data);

        if (i) {
            av_freep(&p->avctx->priv_data);
            av_freep(&p->avctx->internal);
            av_freep(&p->avctx->slice_offset);
        }

        av_freep(&p->avctx);
    }

    av_freep(&fctx->threads);
    pthread_mutex_destroy(&fctx->buffer_mutex);
    av_freep(&avctx->thread_opaque);
}
コード例 #2
0
ファイル: vda_h264_dec.c プロジェクト: 15806905685/FFmpeg
static int vdadec_decode(AVCodecContext *avctx,
        void *data, int *got_frame, AVPacket *avpkt)
{
    VDADecoderContext *ctx = avctx->priv_data;
    AVFrame *pic = data;
    int ret;

    set_context(avctx);
    ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt);
    restore_context(avctx);
    if (*got_frame) {
        AVBufferRef *buffer = pic->buf[0];
        VDABufferContext *context = av_buffer_get_opaque(buffer);
        CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3];

        CVPixelBufferRetain(cv_buffer);
        CVPixelBufferLockBaseAddress(cv_buffer, 0);
        context->cv_buffer = cv_buffer;
        pic->format = ctx->pix_fmt;
        if (CVPixelBufferIsPlanar(cv_buffer)) {
            int i, count = CVPixelBufferGetPlaneCount(cv_buffer);
            av_assert0(count < 4);
            for (i = 0; i < count; i++) {
                pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i);
                pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i);
            }
        } else {
            pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer);
            pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer);
        }
    }
    avctx->pix_fmt = ctx->pix_fmt;

    return ret;
}
コード例 #3
0
ファイル: pthread.c プロジェクト: forbe/FFmpegEncoding
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    FrameThreadContext *fctx = p->parent;
    AVCodecContext *avctx = p->avctx;
    AVCodec *codec = avctx->codec;

    while (1) {
        int i;
        if (p->state == STATE_INPUT_READY && !fctx->die) {
            pthread_mutex_lock(&p->mutex);
            while (p->state == STATE_INPUT_READY && !fctx->die)
                pthread_cond_wait(&p->input_cond, &p->mutex);
            pthread_mutex_unlock(&p->mutex);
        }

        if (fctx->die) break;

        if (!codec->update_thread_context && (avctx->thread_safe_callbacks || avctx->get_buffer == avcodec_default_get_buffer))
            ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->mutex);
        avcodec_get_frame_defaults(&p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, &p->frame, &p->got_frame, &p->avpkt);

        if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx);

        p->state = STATE_INPUT_READY;

        pthread_mutex_lock(&p->progress_mutex);
        for (i = 0; i < MAX_BUFFERS; i++)
            if (p->progress_used[i] && (p->got_frame || p->result<0 || avctx->codec_id != CODEC_ID_H264)) {
                p->progress[i][0] = INT_MAX;
                p->progress[i][1] = INT_MAX;
            }
        pthread_cond_broadcast(&p->progress_cond);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);

        pthread_mutex_unlock(&p->mutex);
    }

    return NULL;
}
コード例 #4
0
ファイル: ffmpeg_movie.cpp プロジェクト: arpu/adscanner
void CFFMPEGLoader::SaveFrame(int iFrame, const char *add) {
    if(pFrame->linesize[0]==0) return;
    FILE *pFile;
    char szFilename[128];
    int  y;


    UINT numBytes=avpicture_get_size(PIX_FMT_RGB24, pVCodecCon->width,
                                     pVCodecCon->height)+100;
    uint8_t *buffer2=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    AVCodec* bmpCodec = avcodec_find_encoder(CODEC_ID_BMP);

    AVCodecContext* bmpCodecContext = avcodec_alloc_context();
    avcodec_open(bmpCodecContext, bmpCodec);

    bmpCodecContext->height = pVCodecCon->height;
    bmpCodecContext->width = pVCodecCon->width;


    int encoded = bmpCodec->encode(bmpCodecContext, buffer2, numBytes,
                                   pFrame);
    avcodec_close(bmpCodecContext);

    // Open file
    sprintf(szFilename, "fr00000.bmp", add);
    UINT mul=10000,pos=2;
    while(mul>0) {
        szFilename[pos++]=iFrame/mul+'0';
        iFrame%=mul;
        mul/=10;
    }
    string s=add;
    s+=szFilename;
    pFile=fopen(s.c_str(), "wb");
    if(pFile==NULL)
        return;

    fwrite(buffer2, 1, encoded,pFile);

    // Close file
    fclose(pFile);
    av_free(buffer2);
}
コード例 #5
0
ファイル: vda_h264_dec.c プロジェクト: Vadiza/sage-3.5b
static av_cold int vdadec_close(AVCodecContext *avctx)
{
  VDADecoderContext *ctx = avctx->priv_data;
  /* release buffers and decoder */
  ff_vda_destroy_decoder(&ctx->vda_ctx);
  /* close H.264 decoder */
  if (ctx->h264_initialized)
    ff_h264_decoder.close(avctx);
  return 0;
}
コード例 #6
0
ファイル: pthread.c プロジェクト: adesurya/gst-mobile
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    FrameThreadContext *fctx = p->parent;
    AVCodecContext *avctx = p->avctx;
    AVCodec *codec = avctx->codec;

    while (1) {
        if (p->state == STATE_INPUT_READY && !fctx->die) {
            pthread_mutex_lock(&p->mutex);
            while (p->state == STATE_INPUT_READY && !fctx->die)
                pthread_cond_wait(&p->input_cond, &p->mutex);
            pthread_mutex_unlock(&p->mutex);
        }

        if (fctx->die) break;

        if (!codec->update_thread_context && avctx->thread_safe_callbacks)
            ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->mutex);
        avcodec_get_frame_defaults(&p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, &p->frame, &p->got_frame, &p->avpkt);

        if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx);

        p->state = STATE_INPUT_READY;

        pthread_mutex_lock(&p->progress_mutex);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);

        pthread_mutex_unlock(&p->mutex);
    }

    return NULL;
}
コード例 #7
0
ファイル: pthread.c プロジェクト: AlexanderGarmash/ffmpeg-ndk
static int frame_thread_init(AVCodecContext *avctx)
{
    int thread_count = avctx->thread_count;
    AVCodec *codec = avctx->codec;
    AVCodecContext *src = avctx;
    FrameThreadContext *fctx;
    int i, err = 0;

    if (!thread_count) {
        int nb_cpus = get_logical_cpus(avctx);
        if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
            nb_cpus = 1;
        // use number of cores + 1 as thread count if there is more than one
        if (nb_cpus > 1)
            thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
        else
            thread_count = avctx->thread_count = 1;
    }

    if (thread_count <= 1) {
        avctx->active_thread_type = 0;
        return 0;
    }

    avctx->thread_opaque = fctx = av_mallocz(sizeof(FrameThreadContext));

    fctx->threads = av_mallocz(sizeof(PerThreadContext) * thread_count);
    pthread_mutex_init(&fctx->buffer_mutex, NULL);
    fctx->delaying = 1;

    for (i = 0; i < thread_count; i++) {
        AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
        PerThreadContext *p  = &fctx->threads[i];

        pthread_mutex_init(&p->mutex, NULL);
        pthread_mutex_init(&p->progress_mutex, NULL);
        pthread_cond_init(&p->input_cond, NULL);
        pthread_cond_init(&p->progress_cond, NULL);
        pthread_cond_init(&p->output_cond, NULL);

        p->parent = fctx;
        p->avctx  = copy;

        if (!copy) {
            err = AVERROR(ENOMEM);
            goto error;
        }

        *copy = *src;
        copy->thread_opaque = p;
        copy->pkt = &p->avpkt;

        if (!i) {
            src = copy;

            if (codec->init)
                err = codec->init(copy);

            update_context_from_thread(avctx, copy, 1);
        } else {
            copy->priv_data = av_malloc(codec->priv_data_size);
            if (!copy->priv_data) {
                err = AVERROR(ENOMEM);
                goto error;
            }
            memcpy(copy->priv_data, src->priv_data, codec->priv_data_size);
            copy->internal = av_malloc(sizeof(AVCodecInternal));
            if (!copy->internal) {
                err = AVERROR(ENOMEM);
                goto error;
            }
            *copy->internal = *src->internal;
            copy->internal->is_copy = 1;

            if (codec->init_thread_copy)
                err = codec->init_thread_copy(copy);
        }

        if (err) goto error;

        err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
        p->thread_init= !err;
        if(!p->thread_init)
            goto error;
    }

    return 0;

error:
    frame_thread_free(avctx, i+1);

    return err;
}
コード例 #8
0
ファイル: vda_h264_dec.c プロジェクト: 15806905685/FFmpeg
static void vdadec_flush(AVCodecContext *avctx)
{
    set_context(avctx);
    ff_h264_decoder.flush(avctx);
    restore_context(avctx);
}
コード例 #9
0
ファイル: vda_h264_dec.c プロジェクト: 15806905685/FFmpeg
static av_cold int vdadec_init(AVCodecContext *avctx)
{
    VDADecoderContext *ctx = avctx->priv_data;
    struct vda_context *vda_ctx = &ctx->vda_ctx;
    OSStatus status;
    int ret, i;

    ctx->h264_initialized = 0;

    /* init pix_fmts of codec */
    if (!ff_h264_vda_decoder.pix_fmts) {
        if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7)
            ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7;
        else
            ff_h264_vda_decoder.pix_fmts = vda_pixfmts;
    }

    /* init vda */
    memset(vda_ctx, 0, sizeof(struct vda_context));
    vda_ctx->width = avctx->width;
    vda_ctx->height = avctx->height;
    vda_ctx->format = 'avc1';
    vda_ctx->use_sync_decoding = 1;
    vda_ctx->use_ref_buffer = 1;
    ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
    switch (ctx->pix_fmt) {
    case AV_PIX_FMT_UYVY422:
        vda_ctx->cv_pix_fmt_type = '2vuy';
        break;
    case AV_PIX_FMT_YUYV422:
        vda_ctx->cv_pix_fmt_type = 'yuvs';
        break;
    case AV_PIX_FMT_NV12:
        vda_ctx->cv_pix_fmt_type = '420v';
        break;
    case AV_PIX_FMT_YUV420P:
        vda_ctx->cv_pix_fmt_type = 'y420';
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt);
        goto failed;
    }
    status = ff_vda_create_decoder(vda_ctx,
                                   avctx->extradata, avctx->extradata_size);
    if (status != kVDADecoderNoErr) {
        av_log(avctx, AV_LOG_ERROR,
                "Failed to init VDA decoder: %d.\n", status);
        goto failed;
    }

    /* init H.264 decoder */
    set_context(avctx);
    ret = ff_h264_decoder.init(avctx);
    restore_context(avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n");
        goto failed;
    }
    ctx->h264_initialized = 1;

    for (i = 0; i < MAX_SPS_COUNT; i++) {
        const SPS *sps = (const SPS*)ctx->h264ctx.ps.sps_list[i]->data;
        if (sps && (sps->bit_depth_luma != 8 ||
                sps->chroma_format_idc == 2 ||
                sps->chroma_format_idc == 3)) {
            av_log(avctx, AV_LOG_ERROR, "Format is not supported.\n");
            goto failed;
        }
    }

    return 0;

failed:
    vdadec_close(avctx);
    return -1;
}
コード例 #10
0
ファイル: ffmpeg_movie.cpp プロジェクト: arpu/adscanner
void CFFMPEGLoader::SaveFrame(const char *fn, int wanted_width, int wanted_height) {
    AVFrame *frameIN=NULL,*frameOUT=NULL;
    if(!pFrameRGB->data[1]) {
        frameOUT=pFrameRGB;
        frameIN=pFrame;
    }
    else {
        frameIN=pFrameRGB;
        frameOUT=pFrame;
    }
    if(frameIN->linesize[0]==0) return;
    FILE *pFile;
    char szFilename[128];
    int  y;

    if(wanted_width==0) wanted_width=pVCodecCon->width;
    if(wanted_height==0) wanted_height=pVCodecCon->height;

    CodecID id=CODEC_ID_BMP;
    int fmt=PIX_FMT_BGR24;

    {
        string str(fn),ending;
        if(str.find_last_of(".")!=-1)
            ending.append(str,str.find_last_of(".")+1,str.length());

        for(int i=0; i<ending.length(); i++)
            ending[i] = tolower(ending[i]);

        if(ending.compare("jpg")==0||ending.compare("jpeg")==0) {
            id=CODEC_ID_JPEGLS;	//don't work
            fmt=PIX_FMT_RGB32;
        }
        else if(ending.compare("gif")==0) {
            id=CODEC_ID_GIF;	//don't work
            fmt=PIX_FMT_RGB32;
        }
        else if(ending.compare("tiff")==0) {
            id=CODEC_ID_TIFF;
            fmt=PIX_FMT_RGB32;
        }
        else if(ending.compare("png")==0) {
            id=CODEC_ID_PNG;
            fmt=PIX_FMT_RGB32;
        }
    }

    UINT numBytes=avpicture_get_size(fmt, wanted_width,
                                     wanted_height);
    uint8_t *buffer2=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    AVCodec* bmpCodec = avcodec_find_encoder(id);
    if(!bmpCodec) {
        av_free(buffer2);
        return;
    }

    AVCodecContext* bmpCodecContext = avcodec_alloc_context();
    int a=avcodec_open(bmpCodecContext, bmpCodec);
    bmpCodecContext->pix_fmt=(PixelFormat)fmt;

    bmpCodecContext->height = wanted_height;
    bmpCodecContext->width = wanted_width;
    {

        uint8_t *buffer;
        // Determine required buffer size and allocate buffer
        numBytes=avpicture_get_size(fmt, wanted_width,
                                    wanted_height)+100;
        pBuffer=buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
        avpicture_fill((AVPicture*)frameOUT, buffer, fmt,
                       wanted_width, wanted_height);

        static struct SwsContext *img_convert_ctx;
        img_convert_ctx = sws_getContext(pVCodecCon->width, pVCodecCon->height,
                                         (int)pVCodecCon->pix_fmt,
                                         wanted_width, wanted_height, fmt, SWS_BICUBIC,
                                         NULL, NULL, NULL);

        if(img_convert_ctx == NULL) {
            cout<<"Cannot initialize the conversion context!\n";
            return;
        }

        a=sws_scale(img_convert_ctx, frameIN->data,
                    frameIN->linesize, 0,
                    pVCodecCon->height,
                    frameOUT->data, frameOUT->linesize);
        sws_freeContext(img_convert_ctx);
    }

    int encoded = bmpCodec->encode(bmpCodecContext, buffer2, numBytes,
                                   frameOUT);
    avcodec_close(bmpCodecContext);

    // Open file
    pFile=fopen(fn, "wb");
    if(pFile==NULL)
        return;

    fwrite(buffer2, 1, encoded,pFile);

    // Close file
    fclose(pFile);
    av_free(buffer2);
    av_free(bmpCodecContext);
}
コード例 #11
0
ファイル: vda_h264_dec.c プロジェクト: Vadiza/sage-3.5b
static void vdadec_flush(AVCodecContext *avctx)
{
  return ff_h264_decoder.flush(avctx);
}
コード例 #12
0
ファイル: vda_h264_dec.c プロジェクト: Vadiza/sage-3.5b
static av_cold int vdadec_init(AVCodecContext *avctx)
{
  VDADecoderContext *ctx = avctx->priv_data;
  struct vda_context *vda_ctx = &ctx->vda_ctx;
  OSStatus status;
  int ret;

  ctx->h264_initialized = 0;

  /* init pix_fmts of codec */
  if (!ff_h264_vda_decoder.pix_fmts) {
    if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7)
      ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7;
    else
      ff_h264_vda_decoder.pix_fmts = vda_pixfmts;
  }

  /* init vda */
  memset(vda_ctx, 0, sizeof(struct vda_context));
  vda_ctx->width = avctx->width;
  vda_ctx->height = avctx->height;
  vda_ctx->format = 'avc1';
  vda_ctx->use_sync_decoding = 1;
  vda_ctx->use_ref_buffer = 1;
  ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
  switch (ctx->pix_fmt) {
  case AV_PIX_FMT_UYVY422:
    vda_ctx->cv_pix_fmt_type = '2vuy';
    break;
  case AV_PIX_FMT_YUYV422:
    vda_ctx->cv_pix_fmt_type = 'yuvs';
    break;
  case AV_PIX_FMT_NV12:
    vda_ctx->cv_pix_fmt_type = '420v';
    break;
  case AV_PIX_FMT_YUV420P:
    vda_ctx->cv_pix_fmt_type = 'y420';
    break;
  default:
    av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt);
    goto failed;
  }
  status = ff_vda_create_decoder(vda_ctx,
                                 avctx->extradata, avctx->extradata_size);
  if (status != kVDADecoderNoErr) {
    av_log(avctx, AV_LOG_ERROR,
           "Failed to init VDA decoder: %d.\n", status);
    goto failed;
  }
  avctx->hwaccel_context = vda_ctx;

  /* changes callback functions */
  avctx->get_format = get_format;
  avctx->get_buffer2 = get_buffer2;
#if FF_API_GET_BUFFER
  // force the old get_buffer to be empty
  avctx->get_buffer = NULL;
#endif

  /* init H.264 decoder */
  ret = ff_h264_decoder.init(avctx);
  if (ret < 0) {
    av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n");
    goto failed;
  }
  ctx->h264_initialized = 1;

  return 0;

  failed:
  vdadec_close(avctx);
  return -1;
}