Exemplo n.º 1
0
/**
* executive scale using opencl
* get filter args
* create output buffer
* create horizontal filter buffer
* create vertical filter buffer
* create  kernels
*/
int hb_ocl_scale_func( void **data, KernelEnv *kenv )
{
    cl_int status;

    cl_mem in_buf = data[0];
    cl_mem out_buf = data[1];
    int crop_top = (intptr_t)data[2];
    int crop_bottom = (intptr_t)data[3];
    int crop_left = (intptr_t)data[4];
    int crop_right = (intptr_t)data[5];
    cl_int in_frame_w = (intptr_t)data[6];
    cl_int in_frame_h = (intptr_t)data[7];
    cl_int out_frame_w = (intptr_t)data[8];
    cl_int out_frame_h = (intptr_t)data[9];
    hb_oclscale_t  *os = data[10];
    hb_buffer_t *in = data[11];
    hb_buffer_t *out = data[12];

    if (hb_ocl == NULL)
    {
        hb_error("hb_ocl_scale_func: OpenCL support not available");
        return 0;
    }

    if (os->initialized == 0)
    {
        hb_log( "Scaling With OpenCL" );
        if (kenv->isAMD != 0)
            hb_log( "Using Zero Copy");
        // create the block kernel
        cl_int status;
        os->m_kernel = hb_ocl->clCreateKernel(kenv->program, "frame_scale", &status);

        os->initialized = 1;
    }

    {
        // Use the new kernel
        cl_event events[5];
        int eventCount = 0;

        if (kenv->isAMD == 0) {
            status = hb_ocl->clEnqueueUnmapMemObject(kenv->command_queue,
                                                     in->cl.buffer, in->data, 0,
                                                     NULL, &events[eventCount++]);
            status = hb_ocl->clEnqueueUnmapMemObject(kenv->command_queue,
                                                     out->cl.buffer, out->data, 0,
                                                     NULL, &events[eventCount++]);
        }

        cl_int srcPlaneOffset0 = in->plane[0].data - in->data;
        cl_int srcPlaneOffset1 = in->plane[1].data - in->data;
        cl_int srcPlaneOffset2 = in->plane[2].data - in->data;
        cl_int srcRowWords0 = in->plane[0].stride;
        cl_int srcRowWords1 = in->plane[1].stride;
        cl_int srcRowWords2 = in->plane[2].stride;
        cl_int dstPlaneOffset0 = out->plane[0].data - out->data;
        cl_int dstPlaneOffset1 = out->plane[1].data - out->data;
        cl_int dstPlaneOffset2 = out->plane[2].data - out->data;
        cl_int dstRowWords0 = out->plane[0].stride;
        cl_int dstRowWords1 = out->plane[1].stride;
        cl_int dstRowWords2 = out->plane[2].stride;

        if (crop_top != 0 || crop_bottom != 0 || crop_left != 0 || crop_right != 0) {
            srcPlaneOffset0 += crop_left + crop_top * srcRowWords0;
            srcPlaneOffset1 += crop_left / 2 + (crop_top / 2) * srcRowWords1;
            srcPlaneOffset2 += crop_left / 2 + (crop_top / 2) * srcRowWords2;
            in_frame_w = in_frame_w - crop_right - crop_left;
            in_frame_h = in_frame_h - crop_bottom - crop_top;
        }

        cl_float xscale = (out_frame_w * 1.0f) / in_frame_w;
        cl_float yscale = (out_frame_h * 1.0f) / in_frame_h;
        setupScaleWeights(xscale, yscale, out_frame_w, out_frame_h, os, kenv);

        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 0, sizeof(cl_mem), &out_buf);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 1, sizeof(cl_mem), &in_buf);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 2, sizeof(cl_float), &xscale);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 3, sizeof(cl_float), &yscale);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 4, sizeof(cl_int), &srcPlaneOffset0);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 5, sizeof(cl_int), &srcPlaneOffset1);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 6, sizeof(cl_int), &srcPlaneOffset2);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 7, sizeof(cl_int), &dstPlaneOffset0);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 8, sizeof(cl_int), &dstPlaneOffset1);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 9, sizeof(cl_int), &dstPlaneOffset2);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 10, sizeof(cl_int), &srcRowWords0);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 11, sizeof(cl_int), &srcRowWords1);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 12, sizeof(cl_int), &srcRowWords2);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 13, sizeof(cl_int), &dstRowWords0);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 14, sizeof(cl_int), &dstRowWords1);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 15, sizeof(cl_int), &dstRowWords2);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 16, sizeof(cl_int), &in_frame_w);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 17, sizeof(cl_int), &in_frame_h);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 18, sizeof(cl_int), &out_frame_w);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 19, sizeof(cl_int), &out_frame_h);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 20, sizeof(cl_mem), &os->bicubic_x_weights);
        HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 21, sizeof(cl_mem), &os->bicubic_y_weights);

        size_t workOffset[] = { 0, 0, 0 };
        size_t globalWorkSize[] = { 1, 1, 1 };
        size_t localWorkSize[] = { 1, 1, 1 };

        int xgroups = (out_frame_w + 63) / 64;
        int ygroups = (out_frame_h + 15) / 16;

        localWorkSize[0] = 64;
        localWorkSize[1] = 1;
        localWorkSize[2] = 1;
        globalWorkSize[0] = xgroups * 64;
        globalWorkSize[1] = ygroups;
        globalWorkSize[2] = 3;

        HB_OCL_CHECK(hb_ocl->clEnqueueNDRangeKernel, kenv->command_queue,
                     os->m_kernel, 3, workOffset, globalWorkSize, localWorkSize,
                     eventCount, eventCount == 0 ? NULL : &events[0], &events[eventCount]);
        ++eventCount;

        if (kenv->isAMD == 0) {
            in->data  = hb_ocl->clEnqueueMapBuffer(kenv->command_queue, in->cl.buffer,
                                                   CL_FALSE, CL_MAP_READ|CL_MAP_WRITE,
                                                   0, in->alloc,
                                                   eventCount ? 1                       : 0,
                                                   eventCount ? &events[eventCount - 1] : NULL,
                                                   &events[eventCount], &status);
            out->data = hb_ocl->clEnqueueMapBuffer(kenv->command_queue, out->cl.buffer,
                                                   CL_FALSE, CL_MAP_READ|CL_MAP_WRITE,
                                                   0, out->alloc,
                                                   eventCount ? 1                       : 0,
                                                   eventCount ? &events[eventCount - 1] : NULL,
                                                   &events[eventCount + 1], &status);
            eventCount += 2;
        }

        hb_ocl->clFlush(kenv->command_queue);
        hb_ocl->clWaitForEvents(eventCount, &events[0]);
        int i;
        for (i = 0; i < eventCount; ++i)
        {
            hb_ocl->clReleaseEvent(events[i]);
        }
    }

    return 1;
}
Exemplo n.º 2
0
hb_audio_resample_t* hb_audio_resample_init(enum AVSampleFormat sample_fmt,
                                            int hb_amixdown, int normalize_mix)
{
    hb_audio_resample_t *resample = calloc(1, sizeof(hb_audio_resample_t));
    if (resample == NULL)
    {
        hb_error("hb_audio_resample_init: failed to allocate resample");
        goto fail;
    }

    // avresample context, initialized in hb_audio_resample_update()
    resample->avresample = NULL;

    // we don't support planar output yet
    if (av_sample_fmt_is_planar(sample_fmt))
    {
        hb_error("hb_audio_resample_init: planar output not supported ('%s')",
                 av_get_sample_fmt_name(sample_fmt));
        goto fail;
    }

    // convert mixdown to channel_layout/matrix_encoding combo
    int matrix_encoding;
    uint64_t channel_layout = hb_ff_mixdown_xlat(hb_amixdown, &matrix_encoding);

    /*
     * When downmixing, Dual Mono to Mono is a special case:
     * the audio must remain 2-channel until all conversions are done.
     */
    if (hb_amixdown == HB_AMIXDOWN_LEFT || hb_amixdown == HB_AMIXDOWN_RIGHT)
    {
        channel_layout                 = AV_CH_LAYOUT_STEREO;
        resample->dual_mono_downmix    = 1;
        resample->dual_mono_right_only = (hb_amixdown == HB_AMIXDOWN_RIGHT);
    }
    else
    {
        resample->dual_mono_downmix = 0;
    }

    // requested output channel_layout, sample_fmt
    resample->out.channels = av_get_channel_layout_nb_channels(channel_layout);
    resample->out.channel_layout      = channel_layout;
    resample->out.matrix_encoding     = matrix_encoding;
    resample->out.normalize_mix_level = normalize_mix;
    resample->out.sample_fmt          = sample_fmt;
    resample->out.sample_size         = av_get_bytes_per_sample(sample_fmt);

    // set default input characteristics
    resample->in.sample_fmt         = resample->out.sample_fmt;
    resample->in.channel_layout     = resample->out.channel_layout;
    resample->in.center_mix_level   = HB_MIXLEV_DEFAULT;
    resample->in.surround_mix_level = HB_MIXLEV_DEFAULT;

    // by default, no conversion needed
    resample->resample_needed = 0;
    return resample;

fail:
    hb_audio_resample_free(resample);
    return NULL;
}
Exemplo n.º 3
0
/**********************************************************************
 * avformatInit
 **********************************************************************
 * Allocates hb_mux_data_t structures, create file and write headers
 *********************************************************************/
static int avformatInit( hb_mux_object_t * m )
{
    hb_job_t   * job   = m->job;
    hb_audio_t    * audio;
    hb_mux_data_t * track;
    int meta_mux;
    int max_tracks;
    int ii, ret;

    const char *muxer_name = NULL;

    uint8_t         default_track_flag = 1;
    uint8_t         need_fonts = 0;
    char *lang;


    m->delay = AV_NOPTS_VALUE;
    max_tracks = 1 + hb_list_count( job->list_audio ) +
                     hb_list_count( job->list_subtitle );

    m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*));

    m->oc = avformat_alloc_context();
    if (m->oc == NULL)
    {
        hb_error( "Could not initialize avformat context." );
        goto error;
    }

    switch (job->mux)
    {
        case HB_MUX_AV_MP4:
            m->time_base.num = 1;
            m->time_base.den = 90000;
            if( job->ipod_atom )
                muxer_name = "ipod";
            else
                muxer_name = "mp4";
            meta_mux = META_MUX_MP4;
            break;

        case HB_MUX_AV_MKV:
            // libavformat is essentially hard coded such that it only
            // works with a timebase of 1/1000
            m->time_base.num = 1;
            m->time_base.den = 1000;
            muxer_name = "matroska";
            meta_mux = META_MUX_MKV;
            break;

        default:
        {
            hb_error("Invalid Mux %x", job->mux);
            goto error;
        }
    }
    m->oc->oformat = av_guess_format(muxer_name, NULL, NULL);
    if(m->oc->oformat == NULL)
    {
        hb_error("Could not guess output format %s", muxer_name);
        goto error;
    }
    av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename));
    ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE,
                     &m->oc->interrupt_callback, NULL);
    if( ret < 0 )
    {
        hb_error( "avio_open2 failed, errno %d", ret);
        goto error;
    }

    /* Video track */
    track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
    job->mux_data = track;

    track->type = MUX_TYPE_VIDEO;
    track->st = avformat_new_stream(m->oc, NULL);
    if (track->st == NULL)
    {
        hb_error("Could not initialize video stream");
        goto error;
    }
    track->st->time_base = m->time_base;
    avcodec_get_context_defaults3(track->st->codec, NULL);

    track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    uint8_t *priv_data = NULL;
    int priv_size = 0;
    switch (job->vcodec)
    {
        case HB_VCODEC_X264:
        case HB_VCODEC_QSV_H264:
            track->st->codec->codec_id = AV_CODEC_ID_H264;

            /* Taken from x264 muxers.c */
            priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 +
                        job->config.h264.pps_length;
            priv_data = av_malloc(priv_size);
            if (priv_data == NULL)
            {
                hb_error("malloc failure");
                goto error;
            }

            priv_data[0] = 1;
            priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */
            priv_data[2] = job->config.h264.sps[2]; /* profile_compat */
            priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */
            priv_data[4] = 0xff; // nalu size length is four bytes
            priv_data[5] = 0xe1; // one sps

            priv_data[6] = job->config.h264.sps_length >> 8;
            priv_data[7] = job->config.h264.sps_length;

            memcpy(priv_data+8, job->config.h264.sps,
                   job->config.h264.sps_length);

            priv_data[8+job->config.h264.sps_length] = 1; // one pps
            priv_data[9+job->config.h264.sps_length] =
                                        job->config.h264.pps_length >> 8;
            priv_data[10+job->config.h264.sps_length] =
                                        job->config.h264.pps_length;

            memcpy(priv_data+11+job->config.h264.sps_length,
                   job->config.h264.pps, job->config.h264.pps_length );
            break;

        case HB_VCODEC_FFMPEG_MPEG4:
            track->st->codec->codec_id = AV_CODEC_ID_MPEG4;

            if (job->config.mpeg4.length != 0)
            {
                priv_size = job->config.mpeg4.length;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
            }
            break;

        case HB_VCODEC_FFMPEG_MPEG2:
            track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO;

            if (job->config.mpeg4.length != 0)
            {
                priv_size = job->config.mpeg4.length;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }
                memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
            }
            break;

        case HB_VCODEC_THEORA:
        {
            track->st->codec->codec_id = AV_CODEC_ID_THEORA;

            int size = 0;
            ogg_packet *ogg_headers[3];

            for (ii = 0; ii < 3; ii++)
            {
                ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii];
                size += ogg_headers[ii]->bytes + 2;
            }

            priv_size = size;
            priv_data = av_malloc(priv_size);
            if (priv_data == NULL)
            {
                hb_error("malloc failure");
                goto error;
            }

            size = 0;
            for(ii = 0; ii < 3; ii++)
            {
                AV_WB16(priv_data + size, ogg_headers[ii]->bytes);
                size += 2;
                memcpy(priv_data+size, ogg_headers[ii]->packet,
                                       ogg_headers[ii]->bytes);
                size += ogg_headers[ii]->bytes;
            }
        } break;

        default:
            hb_error("muxavformat: Unknown video codec: %x", job->vcodec);
            goto error;
    }
    track->st->codec->extradata = priv_data;
    track->st->codec->extradata_size = priv_size;

    if (job->anamorphic.mode > 0)
    {
        track->st->sample_aspect_ratio.num        = job->anamorphic.par_width;
        track->st->sample_aspect_ratio.den        = job->anamorphic.par_height;
        track->st->codec->sample_aspect_ratio.num = job->anamorphic.par_width;
        track->st->codec->sample_aspect_ratio.den = job->anamorphic.par_height;
    }
    else
    {
        track->st->sample_aspect_ratio.num        = 1;
        track->st->sample_aspect_ratio.den        = 1;
        track->st->codec->sample_aspect_ratio.num = 1;
        track->st->codec->sample_aspect_ratio.den = 1;
    }
    track->st->codec->width = job->width;
    track->st->codec->height = job->height;
    track->st->disposition |= AV_DISPOSITION_DEFAULT;

    int vrate_base, vrate;
    if( job->pass == 2 )
    {
        hb_interjob_t * interjob = hb_interjob_get( job->h );
        vrate_base = interjob->vrate_base;
        vrate = interjob->vrate;
    }
    else
    {
        vrate_base = job->vrate_base;
        vrate = job->vrate;
    }

    // If the vrate is 27000000, there's a good chance this is
    // a standard rate that we have in our hb_video_rates table.
    // Because of rounding errors and approximations made while
    // measuring framerate, the actual value may not be exact.  So
    // we look for rates that are "close" and make an adjustment
    // to fps.den.
    if (vrate == 27000000)
    {
        const hb_rate_t *video_framerate = NULL;
        while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
        {
            if (abs(vrate_base - video_framerate->rate) < 10)
            {
                vrate_base = video_framerate->rate;
                break;
            }
        }
    }
    hb_reduce(&vrate_base, &vrate, vrate_base, vrate);
    if (job->mux == HB_MUX_AV_MP4)
    {
        // libavformat mp4 muxer requires that the codec time_base have the
        // same denominator as the stream time_base, it uses it for the
        // mdhd timescale.
        double scale = (double)track->st->time_base.den / vrate;
        track->st->codec->time_base.den = track->st->time_base.den;
        track->st->codec->time_base.num = vrate_base * scale;
    }
    else
    {
        track->st->codec->time_base.num = vrate_base;
        track->st->codec->time_base.den = vrate;
    }

    /* add the audio tracks */
    for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ )
    {
        audio = hb_list_item( job->list_audio, ii );
        track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
        audio->priv.mux_data = track;

        track->type = MUX_TYPE_AUDIO;

        track->st = avformat_new_stream(m->oc, NULL);
        if (track->st == NULL)
        {
            hb_error("Could not initialize audio stream");
            goto error;
        }
        avcodec_get_context_defaults3(track->st->codec, NULL);

        track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        if (job->mux == HB_MUX_AV_MP4)
        {
            track->st->codec->time_base.num = audio->config.out.samples_per_frame;
            track->st->codec->time_base.den = audio->config.out.samplerate;
            track->st->time_base.num = 1;
            track->st->time_base.den = audio->config.out.samplerate;
        }
        else
        {
            track->st->codec->time_base = m->time_base;
        }

        priv_data = NULL;
        priv_size = 0;
        switch (audio->config.out.codec & HB_ACODEC_MASK)
        {
            case HB_ACODEC_DCA:
            case HB_ACODEC_DCA_HD:
                track->st->codec->codec_id = AV_CODEC_ID_DTS;
                break;
            case HB_ACODEC_AC3:
                track->st->codec->codec_id = AV_CODEC_ID_AC3;
                break;
            case HB_ACODEC_LAME:
            case HB_ACODEC_MP3:
                track->st->codec->codec_id = AV_CODEC_ID_MP3;
                break;
            case HB_ACODEC_VORBIS:
            {
                track->st->codec->codec_id = AV_CODEC_ID_VORBIS;

                int jj, size = 0;
                ogg_packet *ogg_headers[3];

                for (jj = 0; jj < 3; jj++)
                {
                    ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj];
                    size += ogg_headers[jj]->bytes + 2;
                }

                priv_size = size;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }

                size = 0;
                for(jj = 0; jj < 3; jj++)
                {
                    AV_WB16(priv_data + size, ogg_headers[jj]->bytes);
                    size += 2;
                    memcpy(priv_data+size, ogg_headers[jj]->packet,
                                           ogg_headers[jj]->bytes);
                    size += ogg_headers[jj]->bytes;
                }
            } break;
            case HB_ACODEC_FFFLAC:
            case HB_ACODEC_FFFLAC24:
                track->st->codec->codec_id = AV_CODEC_ID_FLAC;

                if (audio->priv.config.extradata.bytes)
                {
                    priv_size = audio->priv.config.extradata.length;
                    priv_data = av_malloc(priv_size);
                    if (priv_data == NULL)
                    {
                        hb_error("malloc failure");
                        goto error;
                    }
                    memcpy(priv_data,
                           audio->priv.config.extradata.bytes,
                           audio->priv.config.extradata.length);
                }
                break;
            case HB_ACODEC_FAAC:
            case HB_ACODEC_FFAAC:
            case HB_ACODEC_CA_AAC:
            case HB_ACODEC_CA_HAAC:
            case HB_ACODEC_FDK_AAC:
            case HB_ACODEC_FDK_HAAC:
                track->st->codec->codec_id = AV_CODEC_ID_AAC;

                if (audio->priv.config.extradata.bytes)
                {
                    priv_size = audio->priv.config.extradata.length;
                    priv_data = av_malloc(priv_size);
                    if (priv_data == NULL)
                    {
                        hb_error("malloc failure");
                        goto error;
                    }
                    memcpy(priv_data,
                           audio->priv.config.extradata.bytes,
                           audio->priv.config.extradata.length);
                }
                break;
            default:
                hb_error("muxavformat: Unknown audio codec: %x",
                         audio->config.out.codec);
                goto error;
        }
        track->st->codec->extradata = priv_data;
        track->st->codec->extradata_size = priv_size;

        if( default_track_flag )
        {
            track->st->disposition |= AV_DISPOSITION_DEFAULT;
            default_track_flag = 0;
        }

        lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 );
        if (lang != NULL)
        {
            av_dict_set(&track->st->metadata, "language", lang, 0);
        }
        track->st->codec->sample_rate = audio->config.out.samplerate;
        if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
        {
            track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout);
            track->st->codec->channel_layout = audio->config.in.channel_layout;
        }
        else
        {
            track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
            track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
        }

        char *name;
        if (audio->config.out.name == NULL)
        {
            switch (track->st->codec->channels)
            {
                case 1:
                    name = "Mono";
                    break;

                case 2:
                    name = "Stereo";
                    break;

                default:
                    name = "Surround";
                    break;
            }
        }
        else
        {
            name = audio->config.out.name;
        }
        av_dict_set(&track->st->metadata, "title", name, 0);
    }

    char * subidx_fmt =
        "size: %dx%d\n"
        "org: %d, %d\n"
        "scale: 100%%, 100%%\n"
        "alpha: 100%%\n"
        "smooth: OFF\n"
        "fadein/out: 50, 50\n"
        "align: OFF at LEFT TOP\n"
        "time offset: 0\n"
        "forced subs: %s\n"
        "palette: %06x, %06x, %06x, %06x, %06x, %06x, "
        "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n"
        "custom colors: OFF, tridx: 0000, "
        "colors: 000000, 000000, 000000, 000000\n";

    int subtitle_default = -1;
    for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
    {
        hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii );

        if( subtitle->config.dest == PASSTHRUSUB )
        {
            if ( subtitle->config.default_track )
                subtitle_default = ii;
        }
    }
    // Quicktime requires that at least one subtitle is enabled,
    // else it doesn't show any of the subtitles.
    // So check to see if any of the subtitles are flagged to be
    // the defualt.  The default will the the enabled track, else
    // enable the first track.
    if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1)
    {
        subtitle_default = 0;
    }

    for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
    {
        hb_subtitle_t * subtitle;
        uint32_t        rgb[16];
        char            subidx[2048];
        int             len;

        subtitle = hb_list_item( job->list_subtitle, ii );
        if (subtitle->config.dest != PASSTHRUSUB)
            continue;

        track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
        subtitle->mux_data = track;

        track->type = MUX_TYPE_SUBTITLE;
        track->st = avformat_new_stream(m->oc, NULL);
        if (track->st == NULL)
        {
            hb_error("Could not initialize subtitle stream");
            goto error;
        }
        avcodec_get_context_defaults3(track->st->codec, NULL);

        track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
        track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        track->st->time_base = m->time_base;
        track->st->codec->time_base = m->time_base;
        track->st->codec->width = subtitle->width;
        track->st->codec->height = subtitle->height;

        priv_data = NULL;
        priv_size = 0;
        switch (subtitle->source)
        {
            case VOBSUB:
            {
                int jj;
                track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;

                for (jj = 0; jj < 16; jj++)
                    rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]);
                len = snprintf(subidx, 2048, subidx_fmt,
                        subtitle->width, subtitle->height,
                        0, 0, "OFF",
                        rgb[0], rgb[1], rgb[2], rgb[3],
                        rgb[4], rgb[5], rgb[6], rgb[7],
                        rgb[8], rgb[9], rgb[10], rgb[11],
                        rgb[12], rgb[13], rgb[14], rgb[15]);

                priv_size = len + 1;
                priv_data = av_malloc(priv_size);
                if (priv_data == NULL)
                {
                    hb_error("malloc failure");
                    goto error;
                }
                memcpy(priv_data, subidx, priv_size);
            } break;

            case PGSSUB:
            {
                track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE;
            } break;

            case SSASUB:
            {
                if (job->mux == HB_MUX_AV_MP4)
                {
                    track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT;
                }
                else
                {
                    track->st->codec->codec_id = AV_CODEC_ID_SSA;
                    need_fonts = 1;

                    if (subtitle->extradata_size)
                    {
                        priv_size = subtitle->extradata_size;
                        priv_data = av_malloc(priv_size);
                        if (priv_data == NULL)
                        {
                            hb_error("malloc failure");
                            goto error;
                        }
                        memcpy(priv_data, subtitle->extradata, priv_size);
                    }
                }
            } break;

            case CC608SUB:
            case CC708SUB:
            case UTF8SUB:
            case TX3GSUB:
            case SRTSUB:
            {
                if (job->mux == HB_MUX_AV_MP4)
                    track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT;
                else
                    track->st->codec->codec_id = AV_CODEC_ID_TEXT;
            } break;

            default:
                continue;
        }
        if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT)
        {
            // Build codec extradata for tx3g.
            // If we were using a libav codec to generate this data
            // this would (or should) be done for us.
            uint8_t properties[] = {
                0x00, 0x00, 0x00, 0x00,     // Display Flags
                0x01,                       // Horiz. Justification
                0xff,                       // Vert. Justification
                0x00, 0x00, 0x00, 0xff,     // Bg color
                0x00, 0x00, 0x00, 0x00,     // Default text box
                0x00, 0x00, 0x00, 0x00,
                0x00, 0x00, 0x00, 0x00,     // Reserved
                0x00, 0x01,                 // Font ID
                0x00,                       // Font face
                0x18,                       // Font size
                0xff, 0xff, 0xff, 0xff,     // Fg color
                // Font table:
                0x00, 0x00, 0x00, 0x12,     // Font table size
                'f','t','a','b',            // Tag
                0x00, 0x01,                 // Count
                0x00, 0x01,                 // Font ID
                0x05,                       // Font name length
                'A','r','i','a','l'         // Font name
            };

            int width, height = 60;
            if (job->anamorphic.mode)
                width = job->width * ((float)job->anamorphic.par_width / job->anamorphic.par_height);
            else
                width = job->width;
            track->st->codec->width = width;
            track->st->codec->height = height;
            properties[14] = height >> 8;
            properties[15] = height & 0xff;
            properties[16] = width >> 8;
            properties[17] = width & 0xff;

            priv_size = sizeof(properties);
            priv_data = av_malloc(priv_size);
            if (priv_data == NULL)
            {
                hb_error("malloc failure");
                goto error;
            }
            memcpy(priv_data, properties, priv_size);
        }
        track->st->codec->extradata = priv_data;
        track->st->codec->extradata_size = priv_size;

        if ( ii == subtitle_default )
        {
            track->st->disposition |= AV_DISPOSITION_DEFAULT;
        }

        lang = lookup_lang_code(job->mux, subtitle->iso639_2 );
        if (lang != NULL)
        {
            av_dict_set(&track->st->metadata, "language", lang, 0);
        }
    }
Exemplo n.º 4
0
hb_opencl_library_t* hb_opencl_library_init()
{
    hb_opencl_library_t *opencl;
    if ((opencl = calloc(1, sizeof(hb_opencl_library_t))) == NULL)
    {
        hb_error("hb_opencl_library_init: memory allocation failure");
        goto fail;
    }

    opencl->library = HB_OCL_DLOPEN;
    if (opencl->library == NULL)
    {
        goto fail;
    }

#define HB_OCL_LOAD(func)                                                       \
{                                                                               \
    if ((opencl->func = (void*)HB_OCL_DLSYM(opencl->library, #func)) == NULL)   \
    {                                                                           \
        hb_log("hb_opencl_library_init: failed to load function '%s'", #func);  \
        goto fail;                                                              \
    }                                                                           \
}
    HB_OCL_LOAD(clBuildProgram);
    HB_OCL_LOAD(clCreateBuffer);
    HB_OCL_LOAD(clCreateCommandQueue);
    HB_OCL_LOAD(clCreateContextFromType);
    HB_OCL_LOAD(clCreateKernel);
    HB_OCL_LOAD(clCreateProgramWithBinary);
    HB_OCL_LOAD(clCreateProgramWithSource);
    HB_OCL_LOAD(clEnqueueCopyBuffer);
    HB_OCL_LOAD(clEnqueueMapBuffer);
    HB_OCL_LOAD(clEnqueueNDRangeKernel);
    HB_OCL_LOAD(clEnqueueReadBuffer);
    HB_OCL_LOAD(clEnqueueUnmapMemObject);
    HB_OCL_LOAD(clEnqueueWriteBuffer);
    HB_OCL_LOAD(clFlush);
    HB_OCL_LOAD(clGetCommandQueueInfo);
    HB_OCL_LOAD(clGetContextInfo);
    HB_OCL_LOAD(clGetDeviceIDs);
    HB_OCL_LOAD(clGetDeviceInfo);
    HB_OCL_LOAD(clGetPlatformIDs);
    HB_OCL_LOAD(clGetPlatformInfo);
    HB_OCL_LOAD(clGetProgramBuildInfo);
    HB_OCL_LOAD(clGetProgramInfo);
    HB_OCL_LOAD(clReleaseCommandQueue);
    HB_OCL_LOAD(clReleaseContext);
    HB_OCL_LOAD(clReleaseEvent);
    HB_OCL_LOAD(clReleaseKernel);
    HB_OCL_LOAD(clReleaseMemObject);
    HB_OCL_LOAD(clReleaseProgram);
    HB_OCL_LOAD(clSetKernelArg);
    HB_OCL_LOAD(clWaitForEvents);

    //success
    return opencl;

fail:
    hb_opencl_library_close(&opencl);
    return NULL;
}
Exemplo n.º 5
0
int encvorbisInit( hb_work_object_t * w, hb_job_t * job )
{
    hb_audio_t * audio = w->audio;
    int i;
    ogg_packet header[3];

    hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
    w->private_data = pv;
    pv->out_discrete_channels = HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT(audio->config.out.mixdown);

    pv->job   = job;

    hb_log( "encvorbis: opening libvorbis" );

    /* init */
    for( i = 0; i < 3; i++ )
    {
        // Zero vorbis headers so that we don't crash in mk_laceXiph
        // when vorbis_encode_setup_managed fails.
        memset( w->config->vorbis.headers[i], 0, sizeof( ogg_packet ) );
    }
    vorbis_info_init( &pv->vi );

    if( audio->config.out.bitrate > 0 )
    {
        /* 28kbps/channel seems to be the minimum for 6ch vorbis. */
        int min_bitrate = 28 * pv->out_discrete_channels;
        if (pv->out_discrete_channels > 2 && audio->config.out.bitrate < min_bitrate)
        {
            hb_log( "encvorbis: Selected bitrate (%d kbps) too low for %d channel audio.", audio->config.out.bitrate, pv->out_discrete_channels);
            hb_log( "encvorbis: Resetting bitrate to %d kbps", min_bitrate);
            /* Naughty! We shouldn't modify the audio from here. */
            audio->config.out.bitrate = min_bitrate;
        }

        if( vorbis_encode_setup_managed( &pv->vi, pv->out_discrete_channels,
              audio->config.out.samplerate, -1, 1000 * audio->config.out.bitrate, -1 ) )
        {
            hb_error( "encvorbis: vorbis_encode_setup_managed failed.\n" );
            *job->die = 1;
            return -1;
        }
    }
    else if( audio->config.out.quality != -1 )
    {
        // map VBR quality to Vorbis API (divide by 10)
        if( vorbis_encode_setup_vbr( &pv->vi, pv->out_discrete_channels,
              audio->config.out.samplerate, audio->config.out.quality/10 ) )
        {
            hb_error( "encvorbis: vorbis_encode_setup_vbr failed.\n" );
            *job->die = 1;
            return -1;
        }
    }

    if( vorbis_encode_ctl( &pv->vi, OV_ECTL_RATEMANAGE2_SET, NULL ) ||
          vorbis_encode_setup_init( &pv->vi ) )
    {
        hb_error( "encvorbis: vorbis_encode_ctl( ratemanage2_set ) OR vorbis_encode_setup_init failed.\n" );
        *job->die = 1;
        return -1;
    }

    /* add a comment */
    vorbis_comment_init( &pv->vc );
    vorbis_comment_add_tag( &pv->vc, "Encoder", "HandBrake");
    vorbis_comment_add_tag( &pv->vc, "LANGUAGE", w->config->vorbis.language);

    /* set up the analysis state and auxiliary encoding storage */
    vorbis_analysis_init( &pv->vd, &pv->vi);
    vorbis_block_init( &pv->vd, &pv->vb);

    /* get the 3 headers */
    vorbis_analysis_headerout( &pv->vd, &pv->vc,
                               &header[0], &header[1], &header[2] );
    for( i = 0; i < 3; i++ )
    {
        memcpy( w->config->vorbis.headers[i], &header[i],
                sizeof( ogg_packet ) );
        memcpy( w->config->vorbis.headers[i] + sizeof( ogg_packet ),
                header[i].packet, header[i].bytes );
    }

    pv->input_samples = pv->out_discrete_channels * OGGVORBIS_FRAME_SIZE;
    audio->config.out.samples_per_frame = OGGVORBIS_FRAME_SIZE;
    pv->buf = malloc( pv->input_samples * sizeof( float ) );

    pv->list = hb_list_init();

    switch (pv->out_discrete_channels) {
        case 1:
            pv->channel_map[0] = 0;
            break;
        case 6:
            // Vorbis uses the following channel map = L C R Ls Rs Lfe
            if( audio->config.in.channel_map == &hb_ac3_chan_map )
            {
                pv->channel_map[0] = 1;
                pv->channel_map[1] = 2;
                pv->channel_map[2] = 3;
                pv->channel_map[3] = 4;
                pv->channel_map[4] = 5;
                pv->channel_map[5] = 0;
            }
            else if( audio->config.in.channel_map == &hb_smpte_chan_map )
            {
                pv->channel_map[0] = 0;
                pv->channel_map[1] = 2;
                pv->channel_map[2] = 1;
                pv->channel_map[3] = 4;
                pv->channel_map[4] = 5;
                pv->channel_map[5] = 3;
            }
            else // &hb_qt_chan_map
            {
                pv->channel_map[0] = 1;
                pv->channel_map[1] = 0;
                pv->channel_map[2] = 2;
                pv->channel_map[3] = 3;
                pv->channel_map[4] = 4;
                pv->channel_map[5] = 5;
            }
            break;
        default:
            hb_log("encvorbis.c: Unable to correctly proccess %d channels, assuming stereo.", pv->out_discrete_channels);
        case 2:
            // Assume stereo
            pv->channel_map[0] = 0;
            pv->channel_map[1] = 1;
            break;
    }

    return 0;
}
Exemplo n.º 6
0
/**********************************************************************
 * MKVInit
 **********************************************************************
 * Allocates hb_mux_data_t structures, create file and write headers
 *********************************************************************/
static int MKVInit( hb_mux_object_t * m )
{
    hb_job_t   * job   = m->job;
    hb_audio_t    * audio;
    hb_mux_data_t * mux_data;

    uint8_t         *avcC = NULL;
    uint8_t         default_track_flag = 1;
    uint8_t         need_fonts = 0;
    int             avcC_len, i, j;
    ogg_packet      *ogg_headers[3];
    mk_TrackConfig  *track;
    iso639_lang_t   *lang;

    track = calloc(1, sizeof(mk_TrackConfig));

    m->file = mk_createWriter(job->file, 1000000, 1);

    if( !m->file )
    {
        hb_error( "Could not create output file, Disk Full?" );
        job->mux_data = NULL;
        *job->die = 1;
        free(track);
        return 0;
    }

    /* Video track */
    mux_data      = calloc(1, sizeof( hb_mux_data_t ) );
    job->mux_data = mux_data;

    track->trackType = MK_TRACK_VIDEO;
    track->flagDefault = 1;
    track->flagEnabled = 1;
    switch (job->vcodec)
    {
        case HB_VCODEC_X264:
            track->codecID = MK_VCODEC_MP4AVC;
            /* Taken from x264 muxers.c */
            avcC_len = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 + job->config.h264.pps_length;
            avcC = malloc(avcC_len);
            if (avcC == NULL) {
                free(track);
                return -1;
            }

            avcC[0] = 1;
            avcC[1] = job->config.h264.sps[1];      /* AVCProfileIndication */
            avcC[2] = job->config.h264.sps[2];      /* profile_compat */
            avcC[3] = job->config.h264.sps[3];      /* AVCLevelIndication */
            avcC[4] = 0xff; // nalu size length is four bytes
            avcC[5] = 0xe1; // one sps

            avcC[6] = job->config.h264.sps_length >> 8;
            avcC[7] = job->config.h264.sps_length;

            memcpy(avcC+8, job->config.h264.sps, job->config.h264.sps_length);

            avcC[8+job->config.h264.sps_length] = 1; // one pps
            avcC[9+job->config.h264.sps_length] = job->config.h264.pps_length >> 8;
            avcC[10+job->config.h264.sps_length] = job->config.h264.pps_length;

            memcpy( avcC+11+job->config.h264.sps_length, job->config.h264.pps, job->config.h264.pps_length );
            track->codecPrivate = avcC;
            track->codecPrivateSize = avcC_len;
            if (job->areBframes)
                track->minCache = 1;
            break;
        case HB_VCODEC_FFMPEG_MPEG4:
            track->codecID = MK_VCODEC_MP4ASP;
            track->codecPrivate = job->config.mpeg4.bytes;
            track->codecPrivateSize = job->config.mpeg4.length;
            if (job->areBframes)
                track->minCache = 1;
            break;
        case HB_VCODEC_FFMPEG_MPEG2:
            track->codecID = MK_VCODEC_MPEG2;
            track->codecPrivate = job->config.mpeg4.bytes;
            track->codecPrivateSize = job->config.mpeg4.length;
            if (job->areBframes)
                track->minCache = 1;
            break;
        case HB_VCODEC_THEORA:
            {
                int i;
                uint64_t cp_size = 0;
                track->codecID = MK_VCODEC_THEORA;
                uint64_t  header_sizes[3];
                for (i = 0; i < 3; ++i)
                {
                    ogg_headers[i] = (ogg_packet *)job->config.theora.headers[i];
                    ogg_headers[i]->packet = (unsigned char *)&job->config.theora.headers[i] + sizeof( ogg_packet );
                    header_sizes[i] = ogg_headers[i]->bytes;
                }
                track->codecPrivate = mk_laceXiph(header_sizes, 2, &cp_size);
                track->codecPrivate = realloc(track->codecPrivate, cp_size + ogg_headers[0]->bytes + ogg_headers[1]->bytes + ogg_headers[2]->bytes);
                for(i = 0; i < 3; ++i)
                {
                    memcpy(track->codecPrivate + cp_size, ogg_headers[i]->packet, ogg_headers[i]->bytes);
                    cp_size += ogg_headers[i]->bytes;
                }
                track->codecPrivateSize = cp_size;
            }
            break;
        default:
            *job->die = 1;
            hb_error("muxmkv: Unknown video codec: %x", job->vcodec);
            free(track);
            return 0;
    }

    track->extra.video.pixelWidth = job->width;
    track->extra.video.pixelHeight = job->height;
    track->extra.video.displayHeight = job->height;
    if( job->anamorphic.mode )
    {
        track->extra.video.displayWidth = job->width * ((double)job->anamorphic.par_width / (double)job->anamorphic.par_height);
    }
    else
    {
        track->extra.video.displayWidth = job->width;
    }

    int vrate_base, vrate;
    if( job->pass == 2 )
    {
        hb_interjob_t * interjob = hb_interjob_get( job->h );
        vrate_base = interjob->vrate_base;
        vrate = interjob->vrate;
    }
    else
    {
        vrate_base = job->vrate_base;
        vrate = job->vrate;
    }
    track->defaultDuration = (int64_t)(((float)vrate_base / (float)vrate) * 1000000000);

    mux_data->track = mk_createTrack(m->file, track);

    /* add the audio tracks */
    for( i = 0; i < hb_list_count( job->list_audio ); i++ )
    {
        audio = hb_list_item( job->list_audio, i );
        mux_data = calloc(1, sizeof( hb_mux_data_t ) );
        audio->priv.mux_data = mux_data;

        mux_data->codec = audio->config.out.codec;

        memset(track, 0, sizeof(mk_TrackConfig));
        switch (audio->config.out.codec & HB_ACODEC_MASK)
        {
            case HB_ACODEC_DCA:
            case HB_ACODEC_DCA_HD:
                track->codecPrivate = NULL;
                track->codecPrivateSize = 0;
                track->codecID = MK_ACODEC_DTS;
                break;
            case HB_ACODEC_AC3:
                track->codecPrivate = NULL;
                track->codecPrivateSize = 0;
                track->codecID = MK_ACODEC_AC3;
                break;
            case HB_ACODEC_LAME:
            case HB_ACODEC_MP3:
                track->codecPrivate = NULL;
                track->codecPrivateSize = 0;
                track->codecID = MK_ACODEC_MP3;
                break;
            case HB_ACODEC_VORBIS:
                {
                    int i;
                    uint64_t cp_size = 0;
                    track->codecID = MK_ACODEC_VORBIS;
                    uint64_t  header_sizes[3];
                    for (i = 0; i < 3; ++i)
                    {
                        ogg_headers[i] = (ogg_packet *)audio->priv.config.vorbis.headers[i];
                        ogg_headers[i]->packet = (unsigned char *)&audio->priv.config.vorbis.headers[i] + sizeof( ogg_packet );
                        header_sizes[i] = ogg_headers[i]->bytes;
                    }
                    track->codecPrivate = mk_laceXiph(header_sizes, 2, &cp_size);
                    track->codecPrivate = realloc(track->codecPrivate, cp_size + ogg_headers[0]->bytes + ogg_headers[1]->bytes + ogg_headers[2]->bytes);
                    for(i = 0; i < 3; ++i)
                    {
                        memcpy(track->codecPrivate + cp_size, ogg_headers[i]->packet, ogg_headers[i]->bytes);
                        cp_size += ogg_headers[i]->bytes;
                    }
                    track->codecPrivateSize = cp_size;
                }
                break;
            case HB_ACODEC_FFFLAC:
            case HB_ACODEC_FFFLAC24:
                if (audio->priv.config.extradata.bytes)
                {
                    track->codecPrivate = create_flac_header(audio->priv.config.extradata.bytes,
                                                             audio->priv.config.extradata.length);
                    track->codecPrivateSize = audio->priv.config.extradata.length + 8;
                }
                track->codecID = MK_ACODEC_FLAC;
                break;
            case HB_ACODEC_FAAC:
            case HB_ACODEC_FFAAC:
            case HB_ACODEC_CA_AAC:
            case HB_ACODEC_CA_HAAC:
            case HB_ACODEC_FDK_AAC:
            case HB_ACODEC_FDK_HAAC:
                track->codecPrivate = audio->priv.config.extradata.bytes;
                track->codecPrivateSize = audio->priv.config.extradata.length;
                track->codecID = MK_ACODEC_AAC;
                break;
            default:
                *job->die = 1;
                hb_error("muxmkv: Unknown audio codec: %x", audio->config.out.codec);
                return 0;
        }

        if( default_track_flag )
        {
            track->flagDefault = 1;
            default_track_flag = 0;
        }
        else
        {
            track->flagDefault = 0;
        }
        track->flagEnabled = 1;
        track->trackType = MK_TRACK_AUDIO;
        // MKV lang codes should be ISO-639-2/B
        lang =  lang_for_code2( audio->config.lang.iso639_2 );
        track->language = lang->iso639_2b ? lang->iso639_2b : lang->iso639_2;
        // sample rate
        if ((audio->config.out.codec == HB_ACODEC_CA_HAAC)  ||
            (audio->config.out.codec == HB_ACODEC_FDK_HAAC) ||
            (audio->config.out.codec == HB_ACODEC_AAC_PASS &&
             audio->config.in.samples_per_frame > 1024))
        {
            // For HE-AAC, write outputSamplingFreq too
            // samplingFreq is half of outputSamplingFreq
            track->extra.audio.outputSamplingFreq = (float)audio->config.out.samplerate;
            track->extra.audio.samplingFreq = track->extra.audio.outputSamplingFreq / 2.;
        }
        else
        {
            track->extra.audio.samplingFreq = (float)audio->config.out.samplerate;
        }
        if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
        {
            track->extra.audio.channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout);
        }
        else
        {
            track->extra.audio.channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
        }
        mux_data->track = mk_createTrack(m->file, track);
        if (audio->config.out.codec == HB_ACODEC_VORBIS ||
            audio->config.out.codec == HB_ACODEC_FFFLAC ||
            audio->config.out.codec == HB_ACODEC_FFFLAC24)
            free(track->codecPrivate);
    }

    char * subidx_fmt =
        "size: %dx%d\n"
        "org: %d, %d\n"
        "scale: 100%%, 100%%\n"
        "alpha: 100%%\n"
        "smooth: OFF\n"
        "fadein/out: 50, 50\n"
        "align: OFF at LEFT TOP\n"
        "time offset: 0\n"
        "forced subs: %s\n"
        "palette: %06x, %06x, %06x, %06x, %06x, %06x, "
        "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n"
        "custom colors: OFF, tridx: 0000, "
        "colors: 000000, 000000, 000000, 000000\n";

    for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
    {
        hb_subtitle_t * subtitle;
        uint32_t        rgb[16];
        char            subidx[2048];
        int             len;

        subtitle = hb_list_item( job->list_subtitle, i );
        if (subtitle->config.dest != PASSTHRUSUB)
            continue;

        memset(track, 0, sizeof(mk_TrackConfig));
        switch (subtitle->source)
        {
            case VOBSUB:
                track->codecID = MK_SUBTITLE_VOBSUB;
                for (j = 0; j < 16; j++)
                    rgb[j] = hb_yuv2rgb(subtitle->palette[j]);
                len = snprintf(subidx, 2048, subidx_fmt, 
                        subtitle->width, subtitle->height,
                        0, 0, "OFF",
                        rgb[0], rgb[1], rgb[2], rgb[3],
                        rgb[4], rgb[5], rgb[6], rgb[7],
                        rgb[8], rgb[9], rgb[10], rgb[11],
                        rgb[12], rgb[13], rgb[14], rgb[15]);
                track->codecPrivate = subidx;
                track->codecPrivateSize = len + 1;
                break;
            case PGSSUB:
                track->codecPrivate = NULL;
                track->codecPrivateSize = 0;
                track->codecID = MK_SUBTITLE_PGS;
                break;
            case SSASUB:
                track->codecID = MK_SUBTITLE_SSA;
                need_fonts = 1;
                track->codecPrivate = subtitle->extradata;
                track->codecPrivateSize = subtitle->extradata_size;
                break;
            case CC608SUB:
            case CC708SUB:
            case UTF8SUB:
            case TX3GSUB:
            case SRTSUB:
                track->codecPrivate = NULL;
                track->codecPrivateSize = 0;
                track->codecID = MK_SUBTITLE_UTF8;
                break;
            default:
                continue;
        }
        if ( subtitle->config.default_track )
        {
            track->flagDefault = 1;
        }

        mux_data = calloc(1, sizeof( hb_mux_data_t ) );
        subtitle->mux_data = mux_data;
        mux_data->subtitle = 1;
        mux_data->sub_format = subtitle->format;

        track->flagEnabled = 1;
        track->trackType = MK_TRACK_SUBTITLE;
        // MKV lang codes should be ISO-639-2/B
        lang =  lang_for_code2( subtitle->iso639_2 );
        track->language = lang->iso639_2b ? lang->iso639_2b : lang->iso639_2;

        mux_data->track = mk_createTrack(m->file, track);
    }

    if (need_fonts)
    {
        hb_list_t * list_attachment = job->list_attachment;
        int i;
        for ( i = 0; i < hb_list_count(list_attachment); i++ )
        {
            hb_attachment_t * attachment = hb_list_item( list_attachment, i );

            if ( attachment->type == FONT_TTF_ATTACH )
            {
                mk_createAttachment(
                    m->file,
                    attachment->name,
                    NULL,
                    "application/x-truetype-font",
                    attachment->data,
                    attachment->size);
            }
        }
    }

    if( mk_writeHeader( m->file, "HandBrake " HB_PROJECT_VERSION) < 0 )
    {
        hb_error( "Failed to write to output file, disk full?");
        *job->die = 1;
    }
    if (track != NULL)
        free(track);
    if (avcC != NULL)
        free(avcC);

    return 0;
}
Exemplo n.º 7
0
static hb_opencl_device_t* hb_opencl_device_get(hb_opencl_library_t *opencl,
                                                cl_device_id device_id)
{
    if (opencl == NULL || opencl->clGetDeviceInfo == NULL)
    {
        hb_error("hb_opencl_device_get: OpenCL support not available");
        return NULL;
    }
    else if (device_id == NULL)
    {
        hb_error("hb_opencl_device_get: invalid device ID");
        return NULL;
    }

    hb_opencl_device_t *device = calloc(1, sizeof(hb_opencl_device_t));
    if (device == NULL)
    {
        hb_error("hb_opencl_device_get: memory allocation failure");
        return NULL;
    }

    cl_int status = CL_SUCCESS;
    device->id    = device_id;

    status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_VENDOR,   sizeof(device->vendor),
                                      device->vendor,    NULL);
    status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_NAME,     sizeof(device->name),
                                       device->name,      NULL);
    status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_VERSION,  sizeof(device->version),
                                      device->version,   NULL);
    status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_TYPE,     sizeof(device->type),
                                     &device->type,     NULL);
    status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_PLATFORM, sizeof(device->platform),
                                     &device->platform, NULL);
    status |= opencl->clGetDeviceInfo(device->id, CL_DRIVER_VERSION,  sizeof(device->driver),
                                      device->driver,    NULL);
    if (status != CL_SUCCESS)
    {
        free(device);
        return NULL;
    }

    if (!strcmp(device->vendor, "Advanced Micro Devices, Inc.") ||
        !strcmp(device->vendor, "AMD"))
    {
        device->ocl_vendor = HB_OCL_VENDOR_AMD;
    }
    else if (!strncmp(device->vendor, "NVIDIA", 6 /* strlen("NVIDIA") */))
    {
        device->ocl_vendor = HB_OCL_VENDOR_NVIDIA;
    }
    else if (!strncmp(device->vendor, "Intel", 5 /* strlen("Intel") */))
    {
        device->ocl_vendor = HB_OCL_VENDOR_INTEL;
    }
    else
    {
        device->ocl_vendor = HB_OCL_VENDOR_OTHER;
    }

    return device;
}
Exemplo n.º 8
0
int hb_qsv_param_default(hb_qsv_param_t *param, mfxVideoParam *videoParam)
{
    if (param != NULL && videoParam != NULL)
    {
        // introduced in API 1.0
        memset(&param->codingOption, 0, sizeof(mfxExtCodingOption));
        param->codingOption.Header.BufferId      = MFX_EXTBUFF_CODING_OPTION;
        param->codingOption.Header.BufferSz      = sizeof(mfxExtCodingOption);
        param->codingOption.MECostType           = 0; // reserved, must be 0
        param->codingOption.MESearchType         = 0; // reserved, must be 0
        param->codingOption.MVSearchWindow.x     = 0; // reserved, must be 0
        param->codingOption.MVSearchWindow.y     = 0; // reserved, must be 0
        param->codingOption.RefPicListReordering = 0; // reserved, must be 0
        param->codingOption.IntraPredBlockSize   = 0; // reserved, must be 0
        param->codingOption.InterPredBlockSize   = 0; // reserved, must be 0
        param->codingOption.MVPrecision          = 0; // reserved, must be 0
        param->codingOption.EndOfSequence        = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.RateDistortionOpt    = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.CAVLC                = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.ResetRefList         = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.MaxDecFrameBuffering = 0; // unspecified
        param->codingOption.AUDelimiter          = MFX_CODINGOPTION_OFF;
        param->codingOption.SingleSeiNalUnit     = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.PicTimingSEI         = MFX_CODINGOPTION_OFF;
        param->codingOption.VuiNalHrdParameters  = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.FramePicture         = MFX_CODINGOPTION_UNKNOWN;
        // introduced in API 1.3
        param->codingOption.RefPicMarkRep        = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.FieldOutput          = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.NalHrdConformance    = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.SingleSeiNalUnit     = MFX_CODINGOPTION_UNKNOWN;
        param->codingOption.VuiVclHrdParameters  = MFX_CODINGOPTION_UNKNOWN;
        // introduced in API 1.4
        param->codingOption.ViewOutput           = MFX_CODINGOPTION_UNKNOWN;
        // introduced in API 1.6
        param->codingOption.RecoveryPointSEI     = MFX_CODINGOPTION_UNKNOWN;

        // introduced in API 1.3
        memset(&param->videoSignalInfo, 0, sizeof(mfxExtVideoSignalInfo));
        param->videoSignalInfo.Header.BufferId          = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
        param->videoSignalInfo.Header.BufferSz          = sizeof(mfxExtVideoSignalInfo);
        param->videoSignalInfo.VideoFormat              = 5; // undefined
        param->videoSignalInfo.VideoFullRange           = 0; // TV range
        param->videoSignalInfo.ColourDescriptionPresent = 0; // don't write to bitstream
        param->videoSignalInfo.ColourPrimaries          = 2; // undefined
        param->videoSignalInfo.TransferCharacteristics  = 2; // undefined
        param->videoSignalInfo.MatrixCoefficients       = 2; // undefined

        // introduced in API 1.6
        memset(&param->codingOption2, 0, sizeof(mfxExtCodingOption2));
        param->codingOption2.Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
        param->codingOption2.Header.BufferSz = sizeof(mfxExtCodingOption2);
        param->codingOption2.IntRefType      = 0;
        param->codingOption2.IntRefCycleSize = 2;
        param->codingOption2.IntRefQPDelta   = 0;
        param->codingOption2.MaxFrameSize    = 0;
        param->codingOption2.BitrateLimit    = MFX_CODINGOPTION_ON;
        param->codingOption2.ExtBRC          = MFX_CODINGOPTION_OFF;
        param->codingOption2.MBBRC           = MFX_CODINGOPTION_UNKNOWN;
        // introduced in API 1.7
        param->codingOption2.LookAheadDepth  = 40;
        param->codingOption2.Trellis         = MFX_TRELLIS_UNKNOWN;

        // GOP & rate control
        param->gop.b_pyramid          =  0;
        param->gop.gop_pic_size       = -1; // set automatically
        param->gop.int_ref_cycle_size = -1; // set automatically
        param->rc.lookahead           = -1; // set automatically
        param->rc.cqp_offsets[0]      =  0;
        param->rc.cqp_offsets[1]      =  2;
        param->rc.cqp_offsets[2]      =  4;
        param->rc.vbv_max_bitrate     =  0; // set automatically
        param->rc.vbv_buffer_size     =  0; // set automatically
        param->rc.vbv_buffer_init     = .0; // set automatically

        // introduced in API 1.0
        memset(videoParam, 0, sizeof(mfxVideoParam));
        param->videoParam                   = videoParam;
        param->videoParam->Protected        = 0; // reserved, must be 0
        param->videoParam->NumExtParam      = 0;
        param->videoParam->IOPattern        = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
        param->videoParam->mfx.TargetUsage  = MFX_TARGETUSAGE_2;
        param->videoParam->mfx.GopOptFlag   = MFX_GOP_CLOSED;
        param->videoParam->mfx.NumThread    = 0; // deprecated, must be 0
        param->videoParam->mfx.EncodedOrder = 0; // input is in display order
        param->videoParam->mfx.IdrInterval  = 0; // all I-frames are IDR
        param->videoParam->mfx.NumSlice     = 0; // use Media SDK default
        param->videoParam->mfx.NumRefFrame  = 0; // use Media SDK default
        param->videoParam->mfx.GopPicSize   = 0; // use Media SDK default
        param->videoParam->mfx.GopRefDist   = 0; // use Media SDK default
        // introduced in API 1.1
        param->videoParam->AsyncDepth = AV_QSV_ASYNC_DEPTH_DEFAULT;
        // introduced in API 1.3
        param->videoParam->mfx.BRCParamMultiplier = 0; // no multiplier

        // FrameInfo: set by video encoder, except PicStruct
        param->videoParam->mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;

        // attach supported mfxExtBuffer structures to the mfxVideoParam
        param->videoParam->NumExtParam                                = 0;
        param->videoParam->ExtParam                                   = param->ExtParamArray;
        param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)&param->codingOption;
        param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)&param->videoSignalInfo;
        if (hb_qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
        {
            param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)&param->codingOption2;
        }
    }
    else
    {
        hb_error("hb_qsv_param_default: invalid pointer(s)");
        return -1;
    }
    return 0;
}
Exemplo n.º 9
0
static int decsrtInit( hb_work_object_t * w, hb_job_t * job )
{
    hb_work_private_t * pv;
    int i;
    hb_chapter_t * chapter;

    pv = calloc( 1, sizeof( hb_work_private_t ) );
    if (pv == NULL)
    {
        goto fail;
    }

    w->private_data = pv;

    pv->job = job;
    pv->current_state = k_state_potential_new_entry;
    pv->number_of_entries = 0;
    pv->last_entry_number = 0;
    pv->current_time = 0;
    pv->subtitle = w->subtitle;

    /*
     * Figure out the start and stop times from the chapters being
     * encoded - drop subtitle not in this range.
     */
    pv->start_time = 0;
    for( i = 1; i < job->chapter_start; ++i )
    {
        chapter = hb_list_item( job->list_chapter, i - 1 );
        if( chapter )
        {
            pv->start_time += chapter->duration;
        } else {
            hb_error( "Could not locate chapter %d for SRT start time", i );
        }
    }
    pv->stop_time = pv->start_time;
    for( i = job->chapter_start; i <= job->chapter_end; ++i )
    {
        chapter = hb_list_item( job->list_chapter, i - 1 );
        if( chapter )
        {
            pv->stop_time += chapter->duration;
        } else {
            hb_error( "Could not locate chapter %d for SRT start time", i );
        }
    }

    hb_deep_log(3, "SRT Start time %"PRId64", stop time %"PRId64,
                pv->start_time, pv->stop_time);

    if (job->pts_to_start != 0)
    {
        pv->start_time = AV_NOPTS_VALUE;
    }

    pv->iconv_context = iconv_open( "utf-8", pv->subtitle->config.src_codeset );
    if( pv->iconv_context == (iconv_t) -1 )
    {
        hb_error("Could not open the iconv library with those file formats\n");
        goto fail;
    } else {
        memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );

        pv->file = hb_fopen(w->subtitle->config.src_filename, "r");

        if( !pv->file )
        {
            hb_error("Could not open the SRT subtitle file '%s'\n",
                     w->subtitle->config.src_filename);
            goto fail;
        }
    }

    // Generate generic SSA Script Info.
    int height = job->title->geometry.height - job->crop[0] - job->crop[1];
    int width = job->title->geometry.width - job->crop[2] - job->crop[3];
    hb_subtitle_add_ssa_header(w->subtitle, HB_FONT_SANS,
                               .066 * job->title->geometry.height,
                               width, height);
    return 0;

fail:
    if (pv != NULL)
    {
        if (pv->iconv_context != (iconv_t) -1)
        {
            iconv_close(pv->iconv_context);
        }
        if (pv->file != NULL)
        {
            fclose(pv->file);
        }
        free(pv);
    }
    return 1;
}
Exemplo n.º 10
0
/***********************************************************************
 * hb_work_encCoreAudio_init
 ***********************************************************************
 *
 **********************************************************************/
int encCoreAudioInit(hb_work_object_t *w, hb_job_t *job, enum AAC_MODE mode)
{
    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    hb_audio_t *audio = w->audio;
    AudioStreamBasicDescription input, output;
    UInt32 tmp, tmpsiz = sizeof(tmp);
    OSStatus err;

    w->private_data = pv;
    pv->job = job;

    // pass the number of channels used into the private work data
    pv->nchannels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    bzero(&input, sizeof(AudioStreamBasicDescription));
    input.mSampleRate = (Float64)audio->config.out.samplerate;
    input.mFormatID = kAudioFormatLinearPCM;
    input.mFormatFlags = (kLinearPCMFormatFlagIsFloat|kAudioFormatFlagsNativeEndian);
    input.mBytesPerPacket = 4 * pv->nchannels;
    input.mFramesPerPacket = 1;
    input.mBytesPerFrame = input.mBytesPerPacket * input.mFramesPerPacket;
    input.mChannelsPerFrame = pv->nchannels;
    input.mBitsPerChannel = 32;

    bzero(&output, sizeof(AudioStreamBasicDescription));
    switch (mode)
    {
    case AAC_MODE_HE:
        output.mFormatID = kAudioFormatMPEG4AAC_HE;
        break;
    case AAC_MODE_LC:
    default:
        output.mFormatID = kAudioFormatMPEG4AAC;
        break;
    }
    output.mSampleRate = (Float64)audio->config.out.samplerate;
    output.mChannelsPerFrame = pv->nchannels;
    // let CoreAudio decide the rest

    // initialise encoder
    err = AudioConverterNew(&input, &output, &pv->converter);
    if (err != noErr)
    {
        // Retry without the samplerate
        bzero(&output, sizeof(AudioStreamBasicDescription));
        switch (mode)
        {
        case AAC_MODE_HE:
            output.mFormatID = kAudioFormatMPEG4AAC_HE;
            break;
        case AAC_MODE_LC:
        default:
            output.mFormatID = kAudioFormatMPEG4AAC;
            break;
        }
        output.mChannelsPerFrame = pv->nchannels;

        err = AudioConverterNew(&input, &output, &pv->converter);

        if (err != noErr)
        {
            hb_log("Error creating an AudioConverter err=%"PRId64" output.mBytesPerFrame=%"PRIu64"",
                   (int64_t)err, (uint64_t)output.mBytesPerFrame);
            *job->done_error = HB_ERROR_UNKNOWN;
            *job->die = 1;
            return -1;
        }
    }

    // set encoder quality to maximum
    tmp = kAudioConverterQuality_Max;
    AudioConverterSetProperty(pv->converter, kAudioConverterCodecQuality,
                              sizeof(tmp), &tmp);

    if (audio->config.out.bitrate > 0)
    {
        // set encoder bitrate control mode to constrained variable
        tmp = kAudioCodecBitRateControlMode_VariableConstrained;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertyBitRateControlMode,
                                  sizeof(tmp), &tmp);

        // get available bitrates
        AudioValueRange *bitrates;
        ssize_t bitrateCounts;
        err = AudioConverterGetPropertyInfo(pv->converter,
                                            kAudioConverterApplicableEncodeBitRates,
                                            &tmpsiz, NULL);
        bitrates = malloc(tmpsiz);
        err = AudioConverterGetProperty(pv->converter,
                                        kAudioConverterApplicableEncodeBitRates,
                                        &tmpsiz, bitrates);
        bitrateCounts = tmpsiz / sizeof(AudioValueRange);

        // set bitrate
        tmp = audio->config.out.bitrate * 1000;
        if (tmp < bitrates[0].mMinimum)
            tmp = bitrates[0].mMinimum;
        if (tmp > bitrates[bitrateCounts-1].mMinimum)
            tmp = bitrates[bitrateCounts-1].mMinimum;
        free(bitrates);
        if (tmp != audio->config.out.bitrate * 1000)
        {
            hb_log("encCoreAudioInit: sanitizing track %d audio bitrate %d to %"PRIu32"",
                   audio->config.out.track, audio->config.out.bitrate, tmp / 1000);
        }
        AudioConverterSetProperty(pv->converter,
                                  kAudioConverterEncodeBitRate,
                                  sizeof(tmp), &tmp);
    }
    else if (audio->config.out.quality >= 0)
    {
        if (mode != AAC_MODE_LC)
        {
            hb_error("encCoreAudioInit: internal error, VBR set but not applicable");
            return 1;
        }
        // set encoder bitrate control mode to variable
        tmp = kAudioCodecBitRateControlMode_Variable;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertyBitRateControlMode,
                                  sizeof(tmp), &tmp);

        // set quality
        tmp = audio->config.out.quality;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertySoundQualityForVBR,
                                  sizeof(tmp), &tmp);
    }
    else
    {
        hb_error("encCoreAudioInit: internal error, bitrate/quality not set");
        return 1;
    }

    // get real input
    tmpsiz = sizeof(input);
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCurrentInputStreamDescription,
                              &tmpsiz, &input);
    // get real output
    tmpsiz = sizeof(output);
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCurrentOutputStreamDescription,
                              &tmpsiz, &output);

    // set sizes
    pv->isamplesiz  = input.mBytesPerPacket;
    pv->isamples    = output.mFramesPerPacket;
    pv->osamplerate = output.mSampleRate;
    audio->config.out.samples_per_frame = pv->isamples;

    // channel remapping
    pv->remap = hb_audio_remap_init(AV_SAMPLE_FMT_FLT, &hb_aac_chan_map,
                                    audio->config.in.channel_map);
    if (pv->remap == NULL)
    {
        hb_error("encCoreAudioInit: hb_audio_remap_init() failed");
    }
    uint64_t layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
    hb_audio_remap_set_channel_layout(pv->remap, layout);

    // get maximum output size
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterPropertyMaximumOutputPacketSize,
                              &tmpsiz, &tmp);
    pv->omaxpacket = tmp;

    // get magic cookie (elementary stream descriptor)
    tmp = HB_CONFIG_MAX_SIZE;
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCompressionMagicCookie,
                              &tmp, w->config->extradata.bytes);
    // CoreAudio returns a complete ESDS, but we only need
    // the DecoderSpecific info.
    UInt8* buffer = NULL;
    ReadESDSDescExt(w->config->extradata.bytes, &buffer, &tmpsiz, 0);
    w->config->extradata.length = tmpsiz;
    memmove(w->config->extradata.bytes, buffer, w->config->extradata.length);
    free(buffer);

    pv->list = hb_list_init();
    pv->buf = NULL;

    return 0;
}
Exemplo n.º 11
0
int hb_qsv_info_init()
{
    static int init_done = 0;
    if (init_done)
        return (hb_qsv_info == NULL);
    init_done = 1;

    hb_qsv_info = calloc(1, sizeof(*hb_qsv_info));
    if (hb_qsv_info == NULL)
    {
        hb_error("hb_qsv_info_init: alloc failure");
        return -1;
    }

    mfxSession session;
    qsv_minimum_version.Major = HB_QSV_MINVERSION_MAJOR;
    qsv_minimum_version.Minor = HB_QSV_MINVERSION_MINOR;

    // check for software fallback
    if (MFXInit(MFX_IMPL_SOFTWARE,
                &qsv_minimum_version, &session) == MFX_ERR_NONE)
    {
        qsv_software_available   = 1;
        preferred_implementation = MFX_IMPL_SOFTWARE;
        // our minimum is supported, but query the actual version
        MFXQueryVersion(session, &qsv_software_version);
        MFXClose(session);
    }

    // check for actual hardware support
    if (MFXInit(MFX_IMPL_HARDWARE_ANY|MFX_IMPL_VIA_ANY,
                &qsv_minimum_version, &session) == MFX_ERR_NONE)
    {
        qsv_hardware_available   = 1;
        preferred_implementation = MFX_IMPL_HARDWARE_ANY|MFX_IMPL_VIA_ANY;
        // our minimum is supported, but query the actual version
        MFXQueryVersion(session, &qsv_hardware_version);
        MFXClose(session);
    }

    // check for version-specific or hardware-specific capabilities
    // we only use software as a fallback, so check hardware first
    if (qsv_hardware_available)
    {
        if (HB_CHECK_MFX_VERSION(qsv_hardware_version, 1, 6))
        {
            hb_qsv_info->capabilities |= HB_QSV_CAP_OPTION2_BRC;
            hb_qsv_info->capabilities |= HB_QSV_CAP_MSDK_API_1_6;
        }
        if (hb_get_cpu_platform() == HB_CPU_PLATFORM_INTEL_HSW)
        {
            if (HB_CHECK_MFX_VERSION(qsv_hardware_version, 1, 7))
            {
                hb_qsv_info->capabilities |= HB_QSV_CAP_OPTION2_TRELLIS;
                hb_qsv_info->capabilities |= HB_QSV_CAP_OPTION2_LOOKAHEAD;
            }
            hb_qsv_info->capabilities |= HB_QSV_CAP_H264_BPYRAMID;
        }
    }
    else if (qsv_software_available)
    {
        if (HB_CHECK_MFX_VERSION(qsv_software_version, 1, 6))
        {
            hb_qsv_info->capabilities |= HB_QSV_CAP_MSDK_API_1_6;
            hb_qsv_info->capabilities |= HB_QSV_CAP_H264_BPYRAMID;
        }
    }

    // note: we pass a pointer to MFXInit but it never gets modified
    //       let's make sure of it just to be safe though
    if (qsv_minimum_version.Major != HB_QSV_MINVERSION_MAJOR ||
        qsv_minimum_version.Minor != HB_QSV_MINVERSION_MINOR)
    {
        hb_error("hb_qsv_info_init: minimum version (%d.%d) was modified",
                 qsv_minimum_version.Major,
                 qsv_minimum_version.Minor);
    }

    // success
    return 0;
}
Exemplo n.º 12
0
/***********************************************************************
 * hb_work_encx265_init
 ***********************************************************************
 *
 **********************************************************************/
int encx265Init(hb_work_object_t *w, hb_job_t *job)
{
    hb_work_private_t  *pv = calloc(1, sizeof(hb_work_private_t));
    int                 ret, depth;
    x265_nal           *nal;
    uint32_t            nnal;
    const char * const *profile_names;

    pv->job              = job;
    pv->last_stop        = AV_NOPTS_VALUE;
    pv->chapter_queue    = hb_chapter_queue_init();
    w->private_data      = pv;

    depth                = hb_video_encoder_get_depth(job->vcodec);
    profile_names        = hb_video_encoder_get_profiles(job->vcodec);
    pv->api              = x265_api_query(depth, X265_BUILD, NULL);

    if (pv->api == NULL)
    {
        hb_error("encx265: x265_api_query failed, bit depth %d.", depth);
        goto fail;
    }

    x265_param *param = pv->param = pv->api->param_alloc();

    if (pv->api->param_default_preset(param, job->encoder_preset,
                                      job->encoder_tune) < 0)
    {
        hb_error("encx265: x265_param_default_preset failed. Preset (%s) Tune (%s)", job->encoder_preset, job->encoder_tune);
        goto fail;
    }

    /* If the PSNR or SSIM tunes are in use, enable the relevant metric */
    param->bEnablePsnr = param->bEnableSsim = 0;
    if (job->encoder_tune != NULL && *job->encoder_tune)
    {
        char *tmp = strdup(job->encoder_tune);
        char *tok = strtok(tmp,   ",./-+");
        do
        {
            if (!strncasecmp(tok, "psnr", 4))
            {
                param->bEnablePsnr = 1;
                break;
            }
            if (!strncasecmp(tok, "ssim", 4))
            {
                param->bEnableSsim = 1;
                break;
            }
        }
        while ((tok = strtok(NULL, ",./-+")) != NULL);
        free(tmp);
    }

    /*
     * Some HandBrake-specific defaults; users can override them
     * using the encoder_options string.
     */
    param->fpsNum      = job->orig_vrate.num;
    param->fpsDenom    = job->orig_vrate.den;
    param->keyframeMin = (double)job->orig_vrate.num / job->orig_vrate.den +
                                 0.5;
    param->keyframeMax = param->keyframeMin * 10;

    /*
     * Video Signal Type (color description only).
     *
     * Use x265_param_parse (let x265 determine which bEnable
     * flags, if any, should be set in the x265_param struct).
     */
    char colorprim[11], transfer[11], colormatrix[11];
    switch (job->color_matrix_code)
    {
        case 1: // ITU BT.601 DVD or SD TV content (NTSC)
            strcpy(colorprim,   "smpte170m");
            strcpy(transfer,        "bt709");
            strcpy(colormatrix, "smpte170m");
            break;
        case 2: // ITU BT.601 DVD or SD TV content (PAL)
            strcpy(colorprim,     "bt470bg");
            strcpy(transfer,        "bt709");
            strcpy(colormatrix, "smpte170m");
            break;
        case 3: // ITU BT.709 HD content
            strcpy(colorprim,   "bt709");
            strcpy(transfer,    "bt709");
            strcpy(colormatrix, "bt709");
            break;
        case 4: // ITU BT.2020 UHD content
            strcpy(colorprim,   "bt2020");
            strcpy(transfer,    "bt709");
            strcpy(colormatrix, "bt2020nc");
            break;
        case 5: // custom
            snprintf(colorprim,   sizeof(colorprim),   "%d", job->color_prim);
            snprintf(transfer,    sizeof(transfer),    "%d", job->color_transfer);
            snprintf(colormatrix, sizeof(colormatrix), "%d", job->color_matrix);
            break;
        default: // detected during scan
            snprintf(colorprim,   sizeof(colorprim),   "%d", job->title->color_prim);
            snprintf(transfer,    sizeof(transfer),    "%d", job->title->color_transfer);
            snprintf(colormatrix, sizeof(colormatrix), "%d", job->title->color_matrix);
            break;
    }
    if (param_parse(pv, param, "colorprim",   colorprim)   ||
        param_parse(pv, param, "transfer",    transfer)    ||
        param_parse(pv, param, "colormatrix", colormatrix))
    {
        goto fail;
    }

    /* iterate through x265_opts and parse the options */
    hb_dict_t *x265_opts;
    x265_opts = hb_encopts_to_dict(job->encoder_options, job->vcodec);

    hb_dict_iter_t iter;
    for (iter  = hb_dict_iter_init(x265_opts);
         iter != HB_DICT_ITER_DONE;
         iter  = hb_dict_iter_next(x265_opts, iter))
    {
        const char *key = hb_dict_iter_key(iter);
        hb_value_t *value = hb_dict_iter_value(iter);
        char *str = hb_value_get_string_xform(value);

        // here's where the strings are passed to libx265 for parsing
        // unknown options or bad values are non-fatal, see encx264.c
        param_parse(pv, param, key, str);
        free(str);
    }
    hb_dict_free(&x265_opts);

    /*
     * Reload colorimetry settings in case custom
     * values were set in the encoder_options string.
     */
    job->color_matrix_code = 4;
    job->color_prim        = param->vui.colorPrimaries;
    job->color_transfer    = param->vui.transferCharacteristics;
    job->color_matrix      = param->vui.matrixCoeffs;

    /*
     * Settings which can't be overridden in the encodeer_options string
     * (muxer-specific settings, resolution, ratecontrol, etc.).
     */
    param->bRepeatHeaders = job->inline_parameter_sets;
    param->sourceWidth    = job->width;
    param->sourceHeight   = job->height;

    /*
     * Let x265 determnine whether to use an aspect ratio
     * index vs. the extended SAR index + SAR width/height.
     */
    char sar[22];
    snprintf(sar, sizeof(sar), "%d:%d", job->par.num, job->par.den);
    if (param_parse(pv, param, "sar", sar))
    {
        goto fail;
    }

    if (job->vquality > HB_INVALID_VIDEO_QUALITY)
    {
        param->rc.rateControlMode = X265_RC_CRF;
        param->rc.rfConstant      = job->vquality;
    }
    else
    {
        param->rc.rateControlMode = X265_RC_ABR;
        param->rc.bitrate         = job->vbitrate;
        if (job->pass_id == HB_PASS_ENCODE_1ST ||
            job->pass_id == HB_PASS_ENCODE_2ND)
        {
            char * stats_file;
            char   pass[2];
            snprintf(pass, sizeof(pass), "%d", job->pass_id);
            stats_file = hb_get_temporary_filename("x265.log");
            if (param_parse(pv, param, "stats", stats_file) ||
                param_parse(pv, param, "pass", pass))
            {
                free(stats_file);
                goto fail;
            }
            free(stats_file);
            if (job->pass_id == HB_PASS_ENCODE_1ST)
            {
                char slowfirstpass[2];
                snprintf(slowfirstpass, sizeof(slowfirstpass), "%d",
                         !job->fastfirstpass);
                if (param_parse(pv, param, "slow-firstpass", slowfirstpass))
                {
                    goto fail;
                }
            }
        }
    }

    /* statsfile (but not 2-pass) */
    if (param->logLevel >= X265_LOG_DEBUG)
    {
        if (param->csvfn == NULL)
        {
            pv->csvfn = hb_get_temporary_filename("x265.csv");
            param->csvfn = strdup(pv->csvfn);
        }
        else
        {
            pv->csvfn = strdup(param->csvfn);
        }
    }

    /* Apply profile and level settings last. */
    if (job->encoder_profile                                      != NULL &&
        strcasecmp(job->encoder_profile, profile_names[0])        != 0    &&
        pv->api->param_apply_profile(param, job->encoder_profile) < 0)
    {
        goto fail;
    }

    /* we should now know whether B-frames are enabled */
    job->areBframes = (param->bframes > 0) + (param->bframes   > 0 &&
                                              param->bBPyramid > 0);

    /* Reset global variables before opening a new encoder */
    pv->api->cleanup();

    pv->x265 = pv->api->encoder_open(param);
    if (pv->x265 == NULL)
    {
        hb_error("encx265: x265_encoder_open failed.");
        goto fail;
    }

    /*
     * x265's output (headers and bitstream) are in Annex B format.
     *
     * Write the header as is, and let the muxer reformat
     * the extradata and output bitstream properly for us.
     */
    ret = pv->api->encoder_headers(pv->x265, &nal, &nnal);
    if (ret < 0)
    {
        hb_error("encx265: x265_encoder_headers failed (%d)", ret);
        goto fail;
    }
    if (ret > sizeof(w->config->h265.headers))
    {
        hb_error("encx265: bitstream headers too large (%d)", ret);
        goto fail;
    }
    memcpy(w->config->h265.headers, nal->payload, ret);
    w->config->h265.headers_length = ret;

    return 0;

fail:
    w->private_data = NULL;
    free(pv);
    return 1;
}
Exemplo n.º 13
0
static int reader_work( hb_work_object_t * w, hb_buffer_t ** buf_in,
                        hb_buffer_t ** buf_out)
{
    hb_work_private_t  * r = w->private_data;
    hb_fifo_t         ** fifos;
    hb_buffer_t        * buf;
    hb_buffer_list_t     list;
    int                  ii, chapter = -1;

    hb_buffer_list_clear(&list);

    if (r->bd)
        chapter = hb_bd_chapter( r->bd );
    else if (r->dvd)
        chapter = hb_dvd_chapter( r->dvd );
    else if (r->stream)
        chapter = hb_stream_chapter( r->stream );

    if( chapter < 0 )
    {
        hb_log( "reader: end of the title reached" );
        reader_send_eof(r);
        return HB_WORK_DONE;
    }
    if( chapter > r->chapter_end )
    {
        hb_log("reader: end of chapter %d (media %d) reached at media chapter %d",
                r->job->chapter_end, r->chapter_end, chapter);
        reader_send_eof(r);
        return HB_WORK_DONE;
    }

    if (r->bd)
    {
        if( (buf = hb_bd_read( r->bd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->dvd)
    {
        if( (buf = hb_dvd_read( r->dvd )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else if (r->stream)
    {
        if ( (buf = hb_stream_read( r->stream )) == NULL )
        {
            reader_send_eof(r);
            return HB_WORK_DONE;
        }
    }
    else
    {
        // This should never happen
        hb_error("Stream not initialized");
        reader_send_eof(r);
        return HB_WORK_DONE;
    }

    (hb_demux[r->title->demuxer])(buf, &list, &r->demux);

    while ((buf = hb_buffer_list_rem_head(&list)) != NULL)
    {
        fifos = GetFifoForId( r, buf->s.id );
        if (fifos && r->stream && !r->start_found)
        {
            // libav is allowing SSA subtitles to leak through that are
            // prior to the seek point.  So only make the adjustment to
            // pts_to_start after we see the next video buffer.
            if (buf->s.id != r->job->title->video_id)
            {
                hb_buffer_close(&buf);
                continue;
            }
            // We will inspect the timestamps of each frame in sync
            // to skip from this seek point to the timestamp we
            // want to start at.
            if (buf->s.start != AV_NOPTS_VALUE &&
                buf->s.start < r->job->pts_to_start)
            {
                r->job->pts_to_start -= buf->s.start;
            }
            else if ( buf->s.start >= r->job->pts_to_start )
            {
                r->job->pts_to_start = 0;
            }
            r->start_found = 1;
        }

        if (buf->s.start   != AV_NOPTS_VALUE &&
            r->scr_changes != r->demux.scr_changes)
        {
            // First valid timestamp after an SCR change.  Update
            // the per-stream scr sequence number
            r->scr_changes = r->demux.scr_changes;

            // libav tries to be too smart with timestamps and
            // enforces unnecessary conditions.  One such condition
            // is that subtitle timestamps must be monotonically
            // increasing.  To ensure this is the case, we calculate
            // an offset upon each SCR change that will guarantee this.
            // This is just a very rough SCR offset.  A fine grained
            // offset that maintains proper sync is calculated in sync.c
            if (r->last_pts != AV_NOPTS_VALUE)
            {
                r->scr_offset  = r->last_pts + 90000 - buf->s.start;
            }
            else
            {
                r->scr_offset  = -buf->s.start;
            }
        }
        // Set the scr sequence that this buffer's timestamps are
        // referenced to.
        buf->s.scr_sequence = r->scr_changes;
        if (buf->s.start != AV_NOPTS_VALUE)
        {
            buf->s.start += r->scr_offset;
        }
        if (buf->s.renderOffset != AV_NOPTS_VALUE)
        {
            buf->s.renderOffset += r->scr_offset;
        }
        if (buf->s.start > r->last_pts)
        {
            r->last_pts = buf->s.start;
            UpdateState(r);
        }

        buf = splice_discontinuity(r, buf);
        if (fifos && buf != NULL)
        {
            /* if there are mutiple output fifos, send a copy of the
             * buffer down all but the first (we have to not ship the
             * original buffer or we'll race with the thread that's
             * consuming the buffer & inject garbage into the data stream). */
            for (ii = 1; fifos[ii] != NULL; ii++)
            {
                hb_buffer_t *buf_copy = hb_buffer_init(buf->size);
                buf_copy->s = buf->s;
                memcpy(buf_copy->data, buf->data, buf->size);
                push_buf(r, fifos[ii], buf_copy);
            }
            push_buf(r, fifos[0], buf);
            buf = NULL;
        }
        else
        {
            hb_buffer_close(&buf);
        }
    }

    hb_buffer_list_close(&list);
    return HB_WORK_OK;
}
Exemplo n.º 14
0
int encvorbisInit(hb_work_object_t *w, hb_job_t *job)
{
    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    hb_audio_t *audio = w->audio;
    w->private_data = pv;
    pv->job = job;

    int i;
    ogg_packet header[3];

    hb_log("encvorbis: opening libvorbis");

    /* init */
    for (i = 0; i < 3; i++)
    {
        // Zero vorbis headers so that we don't crash in mk_laceXiph
        // when vorbis_encode_setup_managed fails.
        memset(w->config->vorbis.headers[i], 0, sizeof(ogg_packet));
    }
    vorbis_info_init(&pv->vi);

    pv->out_discrete_channels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    if (audio->config.out.bitrate > 0)
    {
        if (vorbis_encode_setup_managed(&pv->vi, pv->out_discrete_channels,
                                        audio->config.out.samplerate, -1,
                                        audio->config.out.bitrate * 1000, -1))
        {
            hb_error("encvorbis: vorbis_encode_setup_managed() failed");
            *job->die = 1;
            return -1;
        }
    }
    else if (audio->config.out.quality != HB_INVALID_AUDIO_QUALITY)
    {
        // map VBR quality to Vorbis API (divide by 10)
        if (vorbis_encode_setup_vbr(&pv->vi, pv->out_discrete_channels,
                                    audio->config.out.samplerate,
                                    audio->config.out.quality / 10))
        {
            hb_error("encvorbis: vorbis_encode_setup_vbr() failed");
            *job->die = 1;
            return -1;
        }
    }

    if (vorbis_encode_ctl(&pv->vi, OV_ECTL_RATEMANAGE2_SET, NULL) ||
        vorbis_encode_setup_init(&pv->vi))
    {
        hb_error("encvorbis: vorbis_encode_ctl(ratemanage2_set) OR vorbis_encode_setup_init() failed");
        *job->die = 1;
        return -1;
    }

    /* add a comment */
    vorbis_comment_init(&pv->vc);
    vorbis_comment_add_tag(&pv->vc, "Encoder", "HandBrake");
    vorbis_comment_add_tag(&pv->vc, "LANGUAGE", w->config->vorbis.language);

    /* set up the analysis state and auxiliary encoding storage */
    vorbis_analysis_init(&pv->vd, &pv->vi);
    vorbis_block_init(&pv->vd, &pv->vb);

    /* get the 3 headers */
    vorbis_analysis_headerout(&pv->vd, &pv->vc,
                              &header[0], &header[1], &header[2]);
    for (i = 0; i < 3; i++)
    {
        memcpy(w->config->vorbis.headers[i], &header[i], sizeof(ogg_packet));
        memcpy(w->config->vorbis.headers[i] + sizeof(ogg_packet),
               header[i].packet, header[i].bytes);
    }

    pv->input_samples = pv->out_discrete_channels * OGGVORBIS_FRAME_SIZE;
    audio->config.out.samples_per_frame = OGGVORBIS_FRAME_SIZE;
    pv->buf = malloc(pv->input_samples * sizeof(float));

    pv->list = hb_list_init();

    // channel remapping
    uint64_t layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
    pv->remap_table = hb_audio_remap_build_table(layout, &hb_vorbis_chan_map,
                                                 audio->config.in.channel_map);
    if (pv->remap_table == NULL)
    {
        hb_error("encvorbisInit: hb_audio_remap_build_table() failed");
        *job->die = 1;
        return -1;
    }

    return 0;
}
Exemplo n.º 15
0
static int MKVMux(hb_mux_object_t *m, hb_mux_data_t *mux_data, hb_buffer_t *buf)
{
    char chapter_name[1024];
    hb_chapter_t *chapter_data;
    uint64_t timecode = 0;
    ogg_packet *op    = NULL;
    hb_job_t *job     = m->job;

    if (mux_data == job->mux_data)
    {
        /* Video */
        timecode = buf->s.start * TIMECODE_SCALE;

        if (job->chapter_markers && buf->s.new_chap)
        {
            // reached chapter N, write marker for chapter N-1
            mux_data->current_chapter = buf->s.new_chap - 1;

            // chapter numbers start at 1, but the list starts at 0
            chapter_data = hb_list_item(job->list_chapter,
                                        mux_data->current_chapter - 1);

            // make sure we're not writing a chapter that has 0 length
            if (chapter_data != NULL && mux_data->prev_chapter_tc < timecode)
            {
                if (chapter_data->title != NULL)
                {
                    snprintf(chapter_name, 1023, "%s", chapter_data->title);
                }
                else
                {
                    snprintf(chapter_name, 1023, "Chapter %d",
                             mux_data->current_chapter);
                }
                mk_createChapterSimple(m->file,
                                       mux_data->prev_chapter_tc,
                                       mux_data->prev_chapter_tc, chapter_name);
            }
            mux_data->prev_chapter_tc = timecode;
        }

        if (job->vcodec == HB_VCODEC_THEORA)
        {
            /* ughhh, theora is a pain :( */
            op = (ogg_packet *)buf->data;
            op->packet = buf->data + sizeof( ogg_packet );
            if (mk_startFrame(m->file, mux_data->track) < 0)
            {
                hb_error( "Failed to write frame to output file, Disk Full?" );
                *job->die = 1;
            }
            mk_addFrameData(m->file, mux_data->track, op->packet, op->bytes);
            mk_setFrameFlags(m->file, mux_data->track, timecode, 1, 0);
            hb_buffer_close( &buf );
            return 0;
        }
    }
    else if (mux_data->subtitle)
    {
        if( mk_startFrame(m->file, mux_data->track) < 0)
        {
            hb_error("Failed to write frame to output file, Disk Full?");
            *job->die = 1;
        }
        uint64_t duration;
        timecode = buf->s.start * TIMECODE_SCALE;
        if (buf->s.stop <= buf->s.start)
        {
            duration = 0;
        }
        else
        {
            duration = buf->s.stop * TIMECODE_SCALE - timecode;
        }
        mk_addFrameData(m->file, mux_data->track, buf->data, buf->size);
        mk_setFrameFlags(m->file, mux_data->track, timecode, 1, duration);
        mk_flushFrame(m->file, mux_data->track);
        hb_buffer_close(&buf);
        return 0;
    }
    else
    {
        /* Audio */
        timecode = buf->s.start * TIMECODE_SCALE;
        if (mux_data->codec == HB_ACODEC_VORBIS)
        {
            /* ughhh, vorbis is a pain :( */
            op = (ogg_packet *)buf->data;
            op->packet = buf->data + sizeof( ogg_packet );
            if (mk_startFrame(m->file, mux_data->track))
            {
                hb_error( "Failed to write frame to output file, Disk Full?" );
                *job->die = 1;
            }
            mk_addFrameData(m->file, mux_data->track, op->packet, op->bytes);
            mk_setFrameFlags(m->file, mux_data->track, timecode, 1, 0);
            hb_buffer_close( &buf );
            return 0;
        }
    }

    if( mk_startFrame(m->file, mux_data->track) < 0)
    {
        hb_error( "Failed to write frame to output file, Disk Full?" );
        *job->die = 1;
    }
    mk_addFrameData(m->file, mux_data->track, buf->data, buf->size);
    mk_setFrameFlags(m->file, mux_data->track, timecode,
                     (((job->vcodec == HB_VCODEC_X264 || 
                        (job->vcodec & HB_VCODEC_FFMPEG_MASK)) && 
                       mux_data == job->mux_data) ? 
                            (buf->s.frametype == HB_FRAME_IDR) : 
                            ((buf->s.frametype & HB_FRAME_KEY) != 0)), 0 );
    hb_buffer_close( &buf );
    return 0;
}
Exemplo n.º 16
0
/***********************************************************************
 * DecodePreviews
 ***********************************************************************
 * Decode 10 pictures for the given title.
 * It assumes that data->reader and data->vts have successfully been
 * DVDOpen()ed and ifoOpen()ed.
 **********************************************************************/
static int DecodePreviews( hb_scan_t * data, hb_title_t * title )
{
    int             i, npreviews = 0;
    hb_buffer_t   * buf, * buf_es;
    hb_list_t     * list_es;
    int progressive_count = 0;
    int pulldown_count = 0;
    int doubled_frame_count = 0;
    int interlaced_preview_count = 0;
    info_list_t * info_list = calloc( data->preview_count+1, sizeof(*info_list) );
    crop_record_t *crops = crop_record_init( data->preview_count );

    list_es  = hb_list_init();

    if( data->batch )
    {
        hb_log( "scan: decoding previews for title %d (%s)", title->index, title->path );
    }
    else
    {
        hb_log( "scan: decoding previews for title %d", title->index );
    }

    if (data->bd)
    {
        hb_bd_start( data->bd, title );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->dvd)
    {
        hb_dvd_start( data->dvd, title, 1 );
        title->angle_count = hb_dvd_angle_count( data->dvd );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->batch)
    {
        data->stream = hb_stream_open( title->path, title, 1 );
    }

    if (title->video_codec == WORK_NONE)
    {
        hb_error("No video decoder set!");
        return 0;
    }
    hb_work_object_t *vid_decoder = hb_get_work(title->video_codec);
    vid_decoder->codec_param = title->video_codec_param;
    vid_decoder->title = title;
    vid_decoder->init( vid_decoder, NULL );

    for( i = 0; i < data->preview_count; i++ )
    {
        int j;

        UpdateState3(data, i + 1);

        if ( *data->die )
        {
            free( info_list );
            crop_record_free( crops );
            return 0;
        }
        if (data->bd)
        {
            if( !hb_bd_seek( data->bd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        if (data->dvd)
        {
            if( !hb_dvd_seek( data->dvd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        else if (data->stream)
        {
          /* we start reading streams at zero rather than 1/11 because
           * short streams may have only one sequence header in the entire
           * file and we need it to decode any previews. */
          if (!hb_stream_seek(data->stream, (float) i / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }

        hb_deep_log( 2, "scan: preview %d", i + 1 );

        if ( vid_decoder->flush )
            vid_decoder->flush( vid_decoder );

        hb_buffer_t * vid_buf = NULL;

        for( j = 0; j < 10240 ; j++ )
        {
            if (data->bd)
            {
              if( (buf = hb_bd_read( data->bd )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  goto skip_preview;
              }
            }
            else if (data->dvd)
            {
              if( (buf = hb_dvd_read( data->dvd )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  goto skip_preview;
              }
            }
            else if (data->stream)
            {
              if ( (buf = hb_stream_read( data->stream )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  goto skip_preview;
              }
            }
            else
            {
                // Silence compiler warning
                buf = NULL;
                hb_error( "Error: This can't happen!" );
                goto skip_preview;
            }

            (hb_demux[title->demuxer])(buf, list_es, 0 );

            while( ( buf_es = hb_list_item( list_es, 0 ) ) )
            {
                hb_list_rem( list_es, buf_es );
                if( buf_es->s.id == title->video_id && vid_buf == NULL )
                {
                    vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
                }
                else if( ! AllAudioOK( title ) ) 
                {
                    LookForAudio( title, buf_es );
                    buf_es = NULL;
                }
                if ( buf_es )
                    hb_buffer_close( &buf_es );
            }

            if( vid_buf && AllAudioOK( title ) )
                break;
        }

        if( ! vid_buf )
        {
            hb_log( "scan: could not get a decoded picture" );
            continue;
        }

        /* Get size and rate infos */

        hb_work_info_t vid_info;
        if( !vid_decoder->info( vid_decoder, &vid_info ) )
        {
            /*
             * Could not fill vid_info, don't continue and try to use vid_info
             * in this case.
             */
            if (vid_buf)
            {
                hb_buffer_close( &vid_buf );
            }
            hb_log( "scan: could not get a video information" );
            continue;
        }

        remember_info( info_list, &vid_info );

        if( is_close_to( vid_info.rate_base, 900900, 100 ) &&
            ( vid_buf->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD ) )
        {
            /* Potentially soft telecine material */
            pulldown_count++;
        }

        if( vid_buf->s.flags & PIC_FLAG_REPEAT_FRAME )
        {
            // AVCHD-Lite specifies that all streams are
            // 50 or 60 fps.  To produce 25 or 30 fps, camera
            // makers are repeating all frames.
            doubled_frame_count++;
        }

        if( is_close_to( vid_info.rate_base, 1126125, 100 ) )
        {
            // Frame FPS is 23.976 (meaning it's progressive), so start keeping
            // track of how many are reporting at that speed. When enough 
            // show up that way, we want to make that the overall title FPS.
            progressive_count++;
        }

        while( ( buf_es = hb_list_item( list_es, 0 ) ) )
        {
            hb_list_rem( list_es, buf_es );
            hb_buffer_close( &buf_es );
        }

        /* Check preview for interlacing artifacts */
        if( hb_detect_comb( vid_buf, 10, 30, 9, 10, 30, 9 ) )
        {
            hb_deep_log( 2, "Interlacing detected in preview frame %i", i+1);
            interlaced_preview_count++;
        }
        
        if( data->store_previews )
        {
            hb_save_preview( data->h, title->index, i, vid_buf );
        }

        /* Detect black borders */

        int top, bottom, left, right;
        int h4 = vid_info.height / 4, w4 = vid_info.width / 4;

        // When widescreen content is matted to 16:9 or 4:3 there's sometimes
        // a thin border on the outer edge of the matte. On TV content it can be
        // "line 21" VBI data that's normally hidden in the overscan. For HD
        // content it can just be a diagnostic added in post production so that
        // the frame borders are visible. We try to ignore these borders so
        // we can crop the matte. The border width depends on the resolution
        // (12 pixels on 1080i looks visually the same as 4 pixels on 480i)
        // so we allow the border to be up to 1% of the frame height.
        const int border = vid_info.height / 100;

        for ( top = border; top < h4; ++top )
        {
            if ( ! row_all_dark( vid_buf, top ) )
                break;
        }
        if ( top <= border )
        {
            // we never made it past the border region - see if the rows we
            // didn't check are dark or if we shouldn't crop at all.
            for ( top = 0; top < border; ++top )
            {
                if ( ! row_all_dark( vid_buf, top ) )
                    break;
            }
            if ( top >= border )
            {
                top = 0;
            }
        }
        for ( bottom = border; bottom < h4; ++bottom )
        {
            if ( ! row_all_dark( vid_buf, vid_info.height - 1 - bottom ) )
                break;
        }
        if ( bottom <= border )
        {
            for ( bottom = 0; bottom < border; ++bottom )
            {
                if ( ! row_all_dark( vid_buf, vid_info.height - 1 - bottom ) )
                    break;
            }
            if ( bottom >= border )
            {
                bottom = 0;
            }
        }
        for ( left = 0; left < w4; ++left )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, left ) )
                break;
        }
        for ( right = 0; right < w4; ++right )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, vid_info.width - 1 - right ) )
                break;
        }

        // only record the result if all the crops are less than a quarter of
        // the frame otherwise we can get fooled by frames with a lot of black
        // like titles, credits & fade-thru-black transitions.
        if ( top < h4 && bottom < h4 && left < w4 && right < w4 )
        {
            record_crop( crops, top, bottom, left, right );
        }
        ++npreviews;

skip_preview:
        /* Make sure we found audio rates and bitrates */
        for( j = 0; j < hb_list_count( title->list_audio ); j++ )
        {
            hb_audio_t * audio = hb_list_item( title->list_audio, j );
            if ( audio->priv.scan_cache )
            {
                hb_fifo_flush( audio->priv.scan_cache );
            }
        }
        if (vid_buf)
        {
            hb_buffer_close( &vid_buf );
        }
    }
    UpdateState3(data, i);

    vid_decoder->close( vid_decoder );
    free( vid_decoder );

    if ( data->batch && data->stream )
    {
        hb_stream_close( &data->stream );
    }

    if ( npreviews )
    {
        // use the most common frame info for our final title dimensions
        hb_work_info_t vid_info;
        most_common_info( info_list, &vid_info );

        title->has_resolution_change = has_resolution_change( info_list );
        if ( title->video_codec_name == NULL )
        {
            title->video_codec_name = strdup( vid_info.name );
        }
        title->width = vid_info.width;
        title->height = vid_info.height;
        if ( vid_info.rate && vid_info.rate_base )
        {
            // if the frame rate is very close to one of our "common" framerates,
            // assume it actually is said frame rate; e.g. some 24000/1001 sources
            // may have a rate_base of 1126124 (instead of 1126125)
            const hb_rate_t *video_framerate = NULL;
            while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
            {
                if (is_close_to(vid_info.rate_base, video_framerate->rate, 100))
                {
                    vid_info.rate_base = video_framerate->rate;
                    break;
                }
            }
            title->rate = vid_info.rate;
            title->rate_base = vid_info.rate_base;
            if( vid_info.rate_base == 900900 )
            {
                if( npreviews >= 4 && pulldown_count >= npreviews / 4 )
                {
                    title->rate_base = 1126125;
                    hb_deep_log( 2, "Pulldown detected, setting fps to 23.976" );
                }
                if( npreviews >= 2 && progressive_count >= npreviews / 2 )
                {
                    // We've already deduced that the frame rate is 23.976,
                    // so set it back again.
                    title->rate_base = 1126125;
                    hb_deep_log( 2, "Title's mostly NTSC Film, setting fps to 23.976" );
                }
            }
            if( npreviews >= 2 && doubled_frame_count >= 3 * npreviews / 4 )
            {
                // We've detected that a significant number of the frames
                // have been doubled in duration by repeat flags.
                title->rate_base = 2 * vid_info.rate_base;
                hb_deep_log( 2, "Repeat frames detected, setting fps to %.3f", (float)title->rate / title->rate_base );
            }
        }
        title->video_bitrate = vid_info.bitrate;

        if( vid_info.pixel_aspect_width && vid_info.pixel_aspect_height )
        {
            title->pixel_aspect_width = vid_info.pixel_aspect_width;
            title->pixel_aspect_height = vid_info.pixel_aspect_height;
        }
        title->color_prim = vid_info.color_prim;
        title->color_transfer = vid_info.color_transfer;
        title->color_matrix = vid_info.color_matrix;

        title->video_decode_support = vid_info.video_decode_support;

        // TODO: check video dimensions
        title->opencl_support = !!hb_opencl_available();

        // compute the aspect ratio based on the storage dimensions and the
        // pixel aspect ratio (if supplied) or just storage dimensions if no PAR.
        title->aspect = (double)title->width / (double)title->height;
        title->aspect *= (double)title->pixel_aspect_width /
                         (double)title->pixel_aspect_height;

        // For unknown reasons some French PAL DVDs put the original
        // content's aspect ratio into the mpeg PAR even though it's
        // the wrong PAR for the DVD. Apparently they rely on the fact
        // that DVD players ignore the content PAR and just use the
        // aspect ratio from the DVD metadata. So, if the aspect computed
        // from the PAR is different from the container's aspect we use
        // the container's aspect & recompute the PAR from it.
        if( title->container_aspect && (int)(title->aspect * 9) != (int)(title->container_aspect * 9) )
        {
            hb_log("scan: content PAR gives wrong aspect %.2f; "
                   "using container aspect %.2f", title->aspect,
                   title->container_aspect );
            title->aspect = title->container_aspect;
            hb_reduce( &title->pixel_aspect_width, &title->pixel_aspect_height,
                       (int)(title->aspect * title->height + 0.5), title->width );
        }

        // don't try to crop unless we got at least 3 previews
        if ( crops->n > 2 )
        {
            sort_crops( crops );
            // The next line selects median cropping - at least
            // 50% of the frames will have their borders removed.
            // Other possible choices are loose cropping (i = 0) where 
            // no non-black pixels will be cropped from any frame and a
            // tight cropping (i = crops->n - (crops->n >> 2)) where at
            // least 75% of the frames will have their borders removed.
            i = crops->n >> 1;
            title->crop[0] = EVEN( crops->t[i] );
            title->crop[1] = EVEN( crops->b[i] );
            title->crop[2] = EVEN( crops->l[i] );
            title->crop[3] = EVEN( crops->r[i] );
        }

        hb_log( "scan: %d previews, %dx%d, %.3f fps, autocrop = %d/%d/%d/%d, "
                "aspect %s, PAR %d:%d",
                npreviews, title->width, title->height, (float) title->rate /
                (float) title->rate_base,
                title->crop[0], title->crop[1], title->crop[2], title->crop[3],
                aspect_to_string( title->aspect ), title->pixel_aspect_width,
                title->pixel_aspect_height );

        if( interlaced_preview_count >= ( npreviews / 2 ) )
        {
            hb_log("Title is likely interlaced or telecined (%i out of %i previews). You should do something about that.",
                   interlaced_preview_count, npreviews);
            title->detected_interlacing = 1;
        }
        else
        {
            title->detected_interlacing = 0;
        }
    }
Exemplo n.º 17
0
static int MKVEnd(hb_mux_object_t *m)
{
    char chapter_name[1024];
    hb_chapter_t *chapter_data;
    hb_job_t *job           = m->job;
    hb_mux_data_t *mux_data = job->mux_data;

    if( !job->mux_data )
    {
        /*
         * We must have failed to create the file in the first place.
         */
        return 0;
    }

    if (job->chapter_markers)
    {
        // get the last chapter
        chapter_data = hb_list_item(job->list_chapter,
                                    mux_data->current_chapter++);

        // only write the last chapter marker if it lasts at least 1.5 second
        if (chapter_data != NULL && chapter_data->duration > 135000LL)
        {
            if (chapter_data->title != NULL)
            {
                snprintf(chapter_name, 1023, "%s", chapter_data->title);
            }
            else
            {
                snprintf(chapter_name, 1023, "Chapter %d",
                         mux_data->current_chapter);
            }
            mk_createChapterSimple(m->file,
                                   mux_data->prev_chapter_tc,
                                   mux_data->prev_chapter_tc, chapter_name);
        }
    }

    if( job->metadata )
    {
        hb_metadata_t *md = job->metadata;

        hb_deep_log( 2, "Writing Metadata to output file...");
        if ( md->name )
        {
            mk_createTagSimple( m->file, MK_TAG_TITLE, md->name );
        }
        if ( md->artist )
        {
            mk_createTagSimple( m->file, "ARTIST", md->artist );
        }
        if ( md->album_artist )
        {
            mk_createTagSimple( m->file, "DIRECTOR", md->album_artist );
        }
        if ( md->composer )
        {
            mk_createTagSimple( m->file, "COMPOSER", md->composer );
        }
        if ( md->release_date )
        {
            mk_createTagSimple( m->file, "DATE_RELEASED", md->release_date );
        }
        if ( md->comment )
        {
            mk_createTagSimple( m->file, "SUMMARY", md->comment );
        }
        if ( !md->name && md->album )
        {
            mk_createTagSimple( m->file, MK_TAG_TITLE, md->album );
        }
        if ( md->genre )
        {
            mk_createTagSimple( m->file, MK_TAG_GENRE, md->genre );
        }
        if ( md->description )
        {
            mk_createTagSimple( m->file, "DESCRIPTION", md->description );
        }
        if ( md->long_description )
        {
            mk_createTagSimple( m->file, "SYNOPSIS", md->long_description );
        }
    }

    // Update and track private data that can change during
    // encode.
    int i;
    for( i = 0; i < hb_list_count( job->list_audio ); i++ )
    {
        mk_Track  * track;
        hb_audio_t    * audio;

        audio = hb_list_item( job->list_audio, i );
        track = audio->priv.mux_data->track;

        switch (audio->config.out.codec & HB_ACODEC_MASK)
        {
            case HB_ACODEC_FFFLAC:
            case HB_ACODEC_FFFLAC24:
                if( audio->priv.config.extradata.bytes )
                {
                    uint8_t *header;
                    header = create_flac_header( 
                            audio->priv.config.extradata.bytes,
                            audio->priv.config.extradata.length );
                    mk_updateTrackPrivateData( m->file, track,
                        header,
                        audio->priv.config.extradata.length + 8 );
                    free( header );
                }
                break;
            default:
                break;
        }
    }

    if( mk_close(m->file) < 0 )
    {
        hb_error( "Failed to flush the last frame and close the output file, Disk Full?" );
        *job->die = 1;
    }

    // TODO: Free what we alloc'd

    return 0;
}
Exemplo n.º 18
0
hb_filter_private_t * hb_rotate_init( int pix_fmt,
                                           int width,
                                           int height,
                                           char * settings )
{
    if( pix_fmt != PIX_FMT_YUV420P )
    {
        return 0;
    }

    hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );

    pv->pix_fmt = pix_fmt;

    pv->width[0]  = width;
    pv->height[0] = height;
    pv->width[1]  = pv->width[2]  = width >> 1;
    pv->height[1] = pv->height[2] = height >> 1;

    pv->buf_out = hb_video_buffer_init( width, height );
    pv->buf_settings = hb_buffer_init( 0 );

    pv->mode     = MODE_DEFAULT;

    pv->ref_stride[0] = pv->width[0];
    pv->ref_stride[1] = pv->width[1];
    pv->ref_stride[2] = pv->width[2];
    
    if( settings )
    {
        sscanf( settings, "%d",
                &pv->mode );
    }

    pv->cpu_count = hb_get_cpu_count();


    /*
     * Create threads and locks.
     */
    pv->rotate_threads = malloc( sizeof( hb_thread_t* ) * pv->cpu_count );
    pv->rotate_begin_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
    pv->rotate_complete_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
    pv->rotate_arguments = malloc( sizeof( rotate_arguments_t ) * pv->cpu_count );

    int i;
    for( i = 0; i < pv->cpu_count; i++ )
    {
        rotate_thread_arg_t *thread_args;
    
        thread_args = malloc( sizeof( rotate_thread_arg_t ) );
    
        if( thread_args ) {
            thread_args->pv = pv;
            thread_args->segment = i;
    
            pv->rotate_begin_lock[i] = hb_lock_init();
            pv->rotate_complete_lock[i] = hb_lock_init();
    
            /*
             * Important to start off with the threads locked waiting
             * on input.
             */
            hb_lock( pv->rotate_begin_lock[i] );
    
            pv->rotate_arguments[i].stop = 0;
            pv->rotate_arguments[i].dst = NULL;
            
            pv->rotate_threads[i] = hb_thread_init( "rotate_filter_segment",
                                                   rotate_filter_thread,
                                                   thread_args,
                                                   HB_NORMAL_PRIORITY );
        } else {
            hb_error( "rotate could not create threads" );
        }
    }

    return pv;
}
Exemplo n.º 19
0
Arquivo: bd.c Projeto: eneko/HandBrake
/***********************************************************************
 * hb_bd_read
 ***********************************************************************
 *
 **********************************************************************/
hb_buffer_t * hb_bd_read( hb_bd_t * d )
{
    int result;
    int error_count = 0;
    uint8_t buf[192];
    BD_EVENT event;
    uint64_t pos;
    hb_buffer_t * b;
    uint8_t discontinuity;
    int new_chap = 0;

    discontinuity = 0;
    while ( 1 )
    {
        if ( d->next_chap != d->chapter )
        {
            new_chap = d->chapter = d->next_chap;
        }
        result = next_packet( d->bd, buf );
        if ( result < 0 )
        {
            hb_error("bd: Read Error");
            pos = bd_tell( d->bd );
            bd_seek( d->bd, pos + 192 );
            error_count++;
            if (error_count > 10)
            {
                hb_error("bd: Error, too many consecutive read errors");
                return 0;
            }
            continue;
        }
        else if ( result == 0 )
        {
            return 0;
        }

        error_count = 0;
        while ( bd_get_event( d->bd, &event ) )
        {
            switch ( event.event )
            {
                case BD_EVENT_CHAPTER:
                    // The muxers expect to only get chapter 2 and above
                    // They write chapter 1 when chapter 2 is detected.
                    d->next_chap = event.param;
                    break;

                case BD_EVENT_PLAYITEM:
                    discontinuity = 1;
                    hb_deep_log(2, "bd: Playitem %u", event.param);
                    break;

                case BD_EVENT_STILL:
                    bd_read_skip_still( d->bd );
                    break;

                default:
                    break;
            }
        }
        // buf+4 to skip the BD timestamp at start of packet
        b = hb_ts_decode_pkt( d->stream, buf+4 );
        if ( b )
        {
            b->s.discontinuity = discontinuity;
            b->s.new_chap = new_chap;
            return b;
        }
    }
    return NULL;
}
Exemplo n.º 20
0
/*
 * rotate this segment of all three planes in a single thread.
 */
void rotate_filter_thread( void *thread_args_v )
{
    rotate_arguments_t *rotate_work = NULL;
    hb_filter_private_t * pv;
    int run = 1;
    int plane;
    int segment, segment_start, segment_stop;
    rotate_thread_arg_t *thread_args = thread_args_v;
    uint8_t **dst;
    int y, w, h, ref_stride;


    pv = thread_args->pv;
    segment = thread_args->segment;

    hb_log("Rotate thread started for segment %d", segment);

    while( run )
    {
        /*
         * Wait here until there is work to do. hb_lock() blocks until
         * render releases it to say that there is more work to do.
         */
        hb_lock( pv->rotate_begin_lock[segment] );

        rotate_work = &pv->rotate_arguments[segment];

        if( rotate_work->stop )
        {
            /*
             * No more work to do, exit this thread.
             */
            run = 0;
            continue;
        } 

        if( rotate_work->dst == NULL )
        {
            hb_error( "Thread started when no work available" );
            hb_snooze(500);
            continue;
        }
        
        /*
         * Process all three planes, but only this segment of it.
         */
        for( plane = 0; plane < 3; plane++)
        {

            dst = rotate_work->dst;
            w = pv->width[plane];
            h = pv->height[plane];
            ref_stride = pv->ref_stride[plane];
            segment_start = ( h / pv->cpu_count ) * segment;
            if( segment == pv->cpu_count - 1 )
            {
                /*
                 * Final segment
                 */
                segment_stop = h;
            } else {
                segment_stop = ( h / pv->cpu_count ) * ( segment + 1 );
            }

            for( y = segment_start; y < segment_stop; y++ )
            {
                uint8_t * cur;
                
                if( pv->mode & 1 )
                {
                    cur  = &pv->pic_in.data[plane][(h-y-1)*pv->pic_in.linesize[plane]];
                }
                else
                {
                    cur  = &pv->pic_in.data[plane][(y)*pv->pic_in.linesize[plane]];
                }
                uint8_t *dst2 = &dst[plane][y*w];

                rotate_filter_line( dst2, 
                                   cur, 
                                   plane, 
                                   pv );
            }
        }
        /*
         * Finished this segment, let everyone know.
         */
        hb_unlock( pv->rotate_complete_lock[segment] );
    }
    free( thread_args_v );
}
Exemplo n.º 21
0
static hb_list_t* hb_opencl_devices_list_get(hb_opencl_library_t *opencl,
                                             cl_device_type device_type)
{
    if (opencl                   == NULL ||
        opencl->library          == NULL ||
        opencl->clGetDeviceIDs   == NULL ||
        opencl->clGetDeviceInfo  == NULL ||
        opencl->clGetPlatformIDs == NULL)
    {
        hb_error("hb_opencl_devices_list_get: OpenCL support not available");
        return NULL;
    }

    hb_list_t *list = hb_list_init();
    if (list == NULL)
    {
        hb_error("hb_opencl_devices_list_get: memory allocation failure");
        return NULL;
    }

    cl_device_id *device_ids;
    hb_opencl_device_t *device;
    cl_platform_id *platform_ids;
    cl_uint i, j, num_platforms, num_devices;

    if (opencl->clGetPlatformIDs(0, NULL, &num_platforms) != CL_SUCCESS || !num_platforms)
    {
        goto fail;
    }
    if ((platform_ids = malloc(sizeof(cl_platform_id) * num_platforms)) == NULL)
    {
        hb_error("hb_opencl_devices_list_get: memory allocation failure");
        goto fail;
    }
    if (opencl->clGetPlatformIDs(num_platforms, platform_ids, NULL) != CL_SUCCESS)
    {
        goto fail;
    }
    for (i = 0; i < num_platforms; i++)
    {
        if (opencl->clGetDeviceIDs(platform_ids[i], device_type, 0, NULL, &num_devices) != CL_SUCCESS || !num_devices)
        {
            // non-fatal
            continue;
        }
        if ((device_ids = malloc(sizeof(cl_device_id) * num_devices)) == NULL)
        {
            hb_error("hb_opencl_devices_list_get: memory allocation failure");
            goto fail;
        }
        if (opencl->clGetDeviceIDs(platform_ids[i], device_type, num_devices, device_ids, NULL) != CL_SUCCESS)
        {
            // non-fatal
            continue;
        }
        for (j = 0; j < num_devices; j++)
        {
            if ((device = hb_opencl_device_get(opencl, device_ids[j])) != NULL)
            {
                hb_list_add(list, device);
            }
        }
    }
    return list;

fail:
    hb_opencl_devices_list_close(&list);
    return NULL;
}
Exemplo n.º 22
0
static int decsrtInit( hb_work_object_t * w, hb_job_t * job )
{
    int retval = 1;
    hb_work_private_t * pv;
    hb_buffer_t *buffer;
    int i;
    hb_chapter_t * chapter;

    pv = calloc( 1, sizeof( hb_work_private_t ) );
    if( pv )
    {
        w->private_data = pv;

        pv->job = job;

        buffer = hb_buffer_init( 0 );
        hb_fifo_push( w->fifo_in, buffer);
        
        pv->current_state = k_state_potential_new_entry;
        pv->number_of_entries = 0;
        pv->last_entry_number = 0;
        pv->current_time = 0;
        pv->subtitle = w->subtitle;

        /*
         * Figure out the start and stop times from teh chapters being
         * encoded - drop subtitle not in this range.
         */
        pv->start_time = 0;
        for( i = 1; i < job->chapter_start; ++i )
        {
            chapter = hb_list_item( job->list_chapter, i - 1 );
            if( chapter )
            {
                pv->start_time += chapter->duration;
            } else {
                hb_error( "Could not locate chapter %d for SRT start time", i );
                retval = 0;
            }
        }
        pv->stop_time = pv->start_time;
        for( i = job->chapter_start; i <= job->chapter_end; ++i )
        {
            chapter = hb_list_item( job->list_chapter, i - 1 );
            if( chapter )
            {
                pv->stop_time += chapter->duration;
            } else {
                hb_error( "Could not locate chapter %d for SRT start time", i );
                retval = 0;
            }
        }

        hb_deep_log( 3, "SRT Start time %"PRId64", stop time %"PRId64, pv->start_time, pv->stop_time);

        pv->iconv_context = iconv_open( "utf-8", pv->subtitle->config.src_codeset );


        if( pv->iconv_context == (iconv_t) -1 )
        {
            hb_error("Could not open the iconv library with those file formats\n");

        } else {
            memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
            
            pv->file = fopen( w->subtitle->config.src_filename, "r" );
            
            if( !pv->file )
            {
                hb_error("Could not open the SRT subtitle file '%s'\n", 
                         w->subtitle->config.src_filename);
            } else {
                retval = 0;
            }
        }
    } 

    return retval;
}
Exemplo n.º 23
0
hb_work_object_t * hb_muxer_init( hb_job_t * job )
{
    hb_title_t  * title = job->title;
    int           i;
    hb_mux_t    * mux = calloc( sizeof( hb_mux_t ), 1 );
    hb_work_object_t  * w;
    hb_work_object_t  * muxer;

    mux->mutex = hb_lock_init();

    // set up to interleave track data in blocks of 1 video frame time.
    // (the best case for buffering and playout latency). The container-
    // specific muxers can reblock this into bigger chunks if necessary.
    mux->interleave = 90000. * (double)job->vrate_base / (double)job->vrate;
    mux->pts = mux->interleave;

    /* Get a real muxer */
    if( job->pass == 0 || job->pass == 2)
    {
        switch( job->mux )
        {
        case HB_MUX_MP4:
            mux->m = hb_mux_mp4_init( job );
            break;
        case HB_MUX_MKV:
            mux->m = hb_mux_mkv_init( job );
            break;
        default:
            hb_error( "No muxer selected, exiting" );
            *job->die = 1;
            return NULL;
        }
        /* Create file, write headers */
        if( mux->m )
        {
            mux->m->init( mux->m );
        }
    }

    /* Initialize the work objects that will receive fifo data */

    muxer = hb_get_work( WORK_MUX );
    muxer->private_data = calloc( sizeof( hb_work_private_t ), 1 );
    muxer->private_data->job = job;
    muxer->private_data->mux = mux;
    mux->ref++;
    muxer->private_data->track = mux->ntracks;
    muxer->fifo_in = job->fifo_mpeg4;
    add_mux_track( mux, job->mux_data, 1 );
    muxer->done = &muxer->private_data->mux->done;

    for( i = 0; i < hb_list_count( title->list_audio ); i++ )
    {
        hb_audio_t  *audio = hb_list_item( title->list_audio, i );

        w = hb_get_work( WORK_MUX );
        w->private_data = calloc( sizeof( hb_work_private_t ), 1 );
        w->private_data->job = job;
        w->private_data->mux = mux;
        mux->ref++;
        w->private_data->track = mux->ntracks;
        w->fifo_in = audio->priv.fifo_out;
        add_mux_track( mux, audio->priv.mux_data, 1 );
        w->done = &job->done;
        hb_list_add( job->list_work, w );
        w->thread = hb_thread_init( w->name, mux_loop, w, HB_NORMAL_PRIORITY );
    }

    for( i = 0; i < hb_list_count( title->list_subtitle ); i++ )
    {
        hb_subtitle_t  *subtitle = hb_list_item( title->list_subtitle, i );

        if (subtitle->config.dest != PASSTHRUSUB)
            continue;

        w = hb_get_work( WORK_MUX );
        w->private_data = calloc( sizeof( hb_work_private_t ), 1 );
        w->private_data->job = job;
        w->private_data->mux = mux;
        mux->ref++;
        w->private_data->track = mux->ntracks;
        w->fifo_in = subtitle->fifo_out;
        add_mux_track( mux, subtitle->mux_data, 0 );
        w->done = &job->done;
        hb_list_add( job->list_work, w );
        w->thread = hb_thread_init( w->name, mux_loop, w, HB_NORMAL_PRIORITY );
    }
    return muxer;
}
Exemplo n.º 24
0
static int utf8_fill( hb_work_private_t * pv )
{
    int bytes, conversion = 0;
    size_t out_size;

    /* Align utf8 data to beginning of the buffer so that we can
     * fill the buffer to its maximum */
    memmove( pv->utf8_buf, pv->utf8_buf + pv->utf8_pos, pv->utf8_end - pv->utf8_pos );
    pv->utf8_end -= pv->utf8_pos;
    pv->utf8_pos = 0;
    out_size = 2048 - pv->utf8_end;
    while( out_size )
    {
        char *p, *q;
        size_t in_size, retval;

        if( pv->end == pv->pos )
        {
            bytes = fread( pv->buf, 1, 1024, pv->file );
            pv->pos = 0;
            pv->end = bytes;
            if( bytes == 0 )
            {
                if( conversion )
                    return 1;
                else
                    return 0;
            }
        }

        p = pv->buf + pv->pos;
        q = pv->utf8_buf + pv->utf8_end;
        in_size = pv->end - pv->pos;

        retval = iconv( pv->iconv_context, &p, &in_size, &q, &out_size);
        if( q != pv->utf8_buf + pv->utf8_pos )
            conversion = 1;

        pv->utf8_end = q - pv->utf8_buf;
        pv->pos = p - pv->buf;

        if ( !pv->utf8_bom_skipped )
        {
            uint8_t *buf = (uint8_t*)pv->utf8_buf;
            if (buf[0] == 0xef && buf[1] == 0xbb && buf[2] == 0xbf)
            {
                pv->utf8_pos = 3;
            }
            pv->utf8_bom_skipped = 1;
        }

        if( ( retval == -1 ) && ( errno == EINVAL ) )
        {
            /* Incomplete multibyte sequence, read more data */
            memmove( pv->buf, p, pv->end - pv->pos );
            pv->end -= pv->pos;
            pv->pos = 0;
            bytes = fread( pv->buf + pv->end, 1, 1024 - pv->end, pv->file );
            if( bytes == 0 )
            {
                if( !conversion )
                    return 0;
                else
                    return 1;
            }
            pv->end += bytes;
        } else if ( ( retval == -1 ) && ( errno == EILSEQ ) )
        {
            hb_error( "Invalid byte for codeset in input, discard byte" );
            /* Try the next byte of the input */
            pv->pos++;
        } else if ( ( retval == -1 ) && ( errno == E2BIG ) )
        {
            /* buffer full */
            return conversion;
        }
    }
    return 1;
}
Exemplo n.º 25
0
int hb_audio_resample_update(hb_audio_resample_t *resample)
{
    if (resample == NULL)
    {
        hb_error("hb_audio_resample_update: resample is NULL");
        return 1;
    }

    int ret, resample_changed;

    resample->resample_needed =
        (resample->out.sample_fmt != resample->in.sample_fmt ||
         resample->out.channel_layout != resample->in.channel_layout);

    resample_changed =
        (resample->resample_needed &&
         (resample->resample.sample_fmt != resample->in.sample_fmt ||
          resample->resample.channel_layout != resample->in.channel_layout ||
          resample->resample.center_mix_level != resample->in.center_mix_level ||
          resample->resample.surround_mix_level != resample->in.surround_mix_level));

    if (resample_changed || (resample->resample_needed &&
                             resample->avresample == NULL))
    {
        if (resample->avresample == NULL)
        {
            resample->avresample = avresample_alloc_context();
            if (resample->avresample == NULL)
            {
                hb_error("hb_audio_resample_update: avresample_alloc_context() failed");
                return 1;
            }

            av_opt_set_int(resample->avresample, "out_sample_fmt",
                           resample->out.sample_fmt, 0);
            av_opt_set_int(resample->avresample, "out_channel_layout",
                           resample->out.channel_layout, 0);
            av_opt_set_int(resample->avresample, "matrix_encoding",
                           resample->out.matrix_encoding, 0);
            av_opt_set_int(resample->avresample, "normalize_mix_level",
                           resample->out.normalize_mix_level, 0);
        }
        else if (resample_changed)
        {
            avresample_close(resample->avresample);
        }

        av_opt_set_int(resample->avresample, "in_sample_fmt",
                       resample->in.sample_fmt, 0);
        av_opt_set_int(resample->avresample, "in_channel_layout",
                       resample->in.channel_layout, 0);
        av_opt_set_double(resample->avresample, "center_mix_level",
                          resample->in.center_mix_level, 0);
        av_opt_set_double(resample->avresample, "surround_mix_level",
                          resample->in.surround_mix_level, 0);

        if ((ret = avresample_open(resample->avresample)))
        {
            char err_desc[64];
            av_strerror(ret, err_desc, 63);
            hb_error("hb_audio_resample_update: avresample_open() failed (%s)",
                     err_desc);
            // avresample won't open, start over
            avresample_free(&resample->avresample);
            return ret;
        }

        resample->resample.sample_fmt         = resample->in.sample_fmt;
        resample->resample.channel_layout     = resample->in.channel_layout;
        resample->resample.channels           =
            av_get_channel_layout_nb_channels(resample->in.channel_layout);
        resample->resample.center_mix_level   = resample->in.center_mix_level;
        resample->resample.surround_mix_level = resample->in.surround_mix_level;
    }

    return 0;
}
Exemplo n.º 26
0
/***********************************************************************
 * DecodePreviews
 ***********************************************************************
 * Decode 10 pictures for the given title.
 * It assumes that data->reader and data->vts have successfully been
 * DVDOpen()ed and ifoOpen()ed.
 **********************************************************************/
static int DecodePreviews( hb_scan_t * data, hb_title_t * title, int flush )
{
    int                i, npreviews = 0, abort = 0;
    hb_buffer_t      * buf, * buf_es;
    hb_buffer_list_t   list_es;
    int                progressive_count = 0;
    int                pulldown_count = 0;
    int                doubled_frame_count = 0;
    int                interlaced_preview_count = 0;
    int                vid_samples = 0;
    int                frame_wait = 0;
    int                cc_wait = 10;
    int                frames;
    hb_stream_t      * stream = NULL;
    info_list_t      * info_list;

    info_list = calloc(data->preview_count+1, sizeof(*info_list));
    crop_record_t *crops = crop_record_init( data->preview_count );

    hb_buffer_list_clear(&list_es);

    if( data->batch )
    {
        hb_log( "scan: decoding previews for title %d (%s)", title->index, title->path );
    }
    else
    {
        hb_log( "scan: decoding previews for title %d", title->index );
    }

    if (data->bd)
    {
        hb_bd_start( data->bd, title );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->dvd)
    {
        hb_dvd_start( data->dvd, title, 1 );
        title->angle_count = hb_dvd_angle_count( data->dvd );
        hb_log( "scan: title angle(s) %d", title->angle_count );
    }
    else if (data->batch)
    {
        stream = hb_stream_open(data->h, title->path, title, 0);
    }
    else if (data->stream)
    {
        stream = hb_stream_open(data->h, data->path, title, 0);
    }

    if (title->video_codec == WORK_NONE)
    {
        hb_error("No video decoder set!");
        return 0;
    }
    hb_work_object_t *vid_decoder = hb_get_work(data->h, title->video_codec);
    vid_decoder->codec_param = title->video_codec_param;
    vid_decoder->title = title;
    vid_decoder->init( vid_decoder, NULL );

    for( i = 0; i < data->preview_count; i++ )
    {
        int j;

        UpdateState3(data, i + 1);

        if ( *data->die )
        {
            free( info_list );
            crop_record_free( crops );
            return 0;
        }
        if (data->bd)
        {
            if( !hb_bd_seek( data->bd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        if (data->dvd)
        {
            if( !hb_dvd_seek( data->dvd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
          {
              continue;
          }
        }
        else if (stream)
        {
            /* we start reading streams at zero rather than 1/11 because
             * short streams may have only one sequence header in the entire
             * file and we need it to decode any previews.
             *
             * Also, seeking to position 0 loses the palette of avi files
             * so skip initial seek */
            if (i != 0)
            {
                if (!hb_stream_seek(stream,
                                    (float)i / (data->preview_count + 1.0)))
                {
                    continue;
                }
            }
            else
            {
                hb_stream_set_need_keyframe(stream, 1);
            }
        }

        hb_deep_log( 2, "scan: preview %d", i + 1 );

        if (flush && vid_decoder->flush)
            vid_decoder->flush( vid_decoder );
        if (title->flags & HBTF_NO_IDR)
        {
            if (!flush)
            {
                // If we are doing the first previews decode attempt,
                // set this threshold high so that we get the best
                // quality frames possible.
                frame_wait = 100;
            }
            else
            {
                // If we failed to get enough valid frames in the first
                // previews decode attempt, lower the threshold to improve
                // our chances of getting something to work with.
                frame_wait = 10;
            }
        }
        else
        {
            // For certain mpeg-2 streams, libav is delivering a
            // dummy first frame that is all black.  So always skip
            // one frame
            frame_wait = 1;
        }
        frames = 0;

        hb_buffer_t * vid_buf = NULL;

        int total_read = 0, packets = 0;
        while (total_read < PREVIEW_READ_THRESH ||
              (!AllAudioOK(title) && packets < 10000)) 
        {
            if (data->bd)
            {
              if( (buf = hb_bd_read( data->bd )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  abort = 1;
                  goto skip_preview;
              }
            }
            else if (data->dvd)
            {
              if( (buf = hb_dvd_read( data->dvd )) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  abort = 1;
                  goto skip_preview;
              }
            }
            else if (stream)
            {
              if ( (buf = hb_stream_read(stream)) == NULL )
              {
                  if ( vid_buf )
                  {
                    break;
                  }
                  hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                  abort = 1;
                  goto skip_preview;
              }
            }
            else
            {
                // Silence compiler warning
                buf = NULL;
                hb_error( "Error: This can't happen!" );
                abort = 1;
                goto skip_preview;
            }

            if (buf->size <= 0)
            {
                hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
                abort = 1;
                goto skip_preview;
            }
            total_read += buf->size;
            packets++;

            (hb_demux[title->demuxer])(buf, &list_es, 0 );

            while ((buf_es = hb_buffer_list_rem_head(&list_es)) != NULL)
            {
                if( buf_es->s.id == title->video_id && vid_buf == NULL )
                {
                    vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
                    // There are 2 conditions we decode additional
                    // video frames for during scan.
                    // 1. We did not detect IDR frames, so the initial video
                    //    frames may be corrupt.  We docode extra frames to
                    //    increase the probability of a complete preview frame
                    // 2. Some frames do not contain CC data, even though
                    //    CCs are present in the stream.  So we need to decode
                    //    additional frames to find the CCs.
                    if (vid_buf != NULL && (frame_wait || cc_wait))
                    {
                        hb_work_info_t vid_info;
                        if (vid_decoder->info(vid_decoder, &vid_info))
                        {
                            if (is_close_to(vid_info.rate.den, 900900, 100) &&
                                (vid_buf->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD))
                            {
                                /* Potentially soft telecine material */
                                pulldown_count++;
                            }

                            if (vid_buf->s.flags & PIC_FLAG_REPEAT_FRAME)
                            {
                                // AVCHD-Lite specifies that all streams are
                                // 50 or 60 fps.  To produce 25 or 30 fps, camera
                                // makers are repeating all frames.
                                doubled_frame_count++;
                            }

                            if (is_close_to(vid_info.rate.den, 1126125, 100 ))
                            {
                                // Frame FPS is 23.976 (meaning it's
                                // progressive), so start keeping track of
                                // how many are reporting at that speed. When
                                // enough show up that way, we want to make
                                // that the overall title FPS.
                                progressive_count++;
                            }
                            vid_samples++;
                        }

                        if (frames > 0 && vid_buf->s.frametype == HB_FRAME_I)
                            frame_wait = 0;
                        if (frame_wait || cc_wait)
                        {
                            hb_buffer_close(&vid_buf);
                            if (frame_wait) frame_wait--;
                            if (cc_wait) cc_wait--;
                        }
                        frames++;
                    }
                }
                else if( ! AllAudioOK( title ) ) 
                {
                    LookForAudio( data, title, buf_es );
                    buf_es = NULL;
                }
                if ( buf_es )
                    hb_buffer_close( &buf_es );
            }

            if( vid_buf && AllAudioOK( title ) )
                break;
        }

        if( ! vid_buf )
        {
            hb_log( "scan: could not get a decoded picture" );
            continue;
        }

        /* Get size and rate infos */

        hb_work_info_t vid_info;
        if( !vid_decoder->info( vid_decoder, &vid_info ) )
        {
            /*
             * Could not fill vid_info, don't continue and try to use vid_info
             * in this case.
             */
            if (vid_buf)
            {
                hb_buffer_close( &vid_buf );
            }
            hb_log( "scan: could not get a video information" );
            continue;
        }

        remember_info( info_list, &vid_info );

        hb_buffer_list_close(&list_es);

        /* Check preview for interlacing artifacts */
        if( hb_detect_comb( vid_buf, 10, 30, 9, 10, 30, 9 ) )
        {
            hb_deep_log( 2, "Interlacing detected in preview frame %i", i+1);
            interlaced_preview_count++;
        }
        
        if( data->store_previews )
        {
            hb_save_preview( data->h, title->index, i, vid_buf );
        }

        /* Detect black borders */

        int top, bottom, left, right;
        int h4 = vid_info.geometry.height / 4, w4 = vid_info.geometry.width / 4;

        // When widescreen content is matted to 16:9 or 4:3 there's sometimes
        // a thin border on the outer edge of the matte. On TV content it can be
        // "line 21" VBI data that's normally hidden in the overscan. For HD
        // content it can just be a diagnostic added in post production so that
        // the frame borders are visible. We try to ignore these borders so
        // we can crop the matte. The border width depends on the resolution
        // (12 pixels on 1080i looks visually the same as 4 pixels on 480i)
        // so we allow the border to be up to 1% of the frame height.
        const int border = vid_info.geometry.height / 100;

        for ( top = border; top < h4; ++top )
        {
            if ( ! row_all_dark( vid_buf, top ) )
                break;
        }
        if ( top <= border )
        {
            // we never made it past the border region - see if the rows we
            // didn't check are dark or if we shouldn't crop at all.
            for ( top = 0; top < border; ++top )
            {
                if ( ! row_all_dark( vid_buf, top ) )
                    break;
            }
            if ( top >= border )
            {
                top = 0;
            }
        }
        for ( bottom = border; bottom < h4; ++bottom )
        {
            if ( ! row_all_dark( vid_buf, vid_info.geometry.height - 1 - bottom ) )
                break;
        }
        if ( bottom <= border )
        {
            for ( bottom = 0; bottom < border; ++bottom )
            {
                if ( ! row_all_dark( vid_buf, vid_info.geometry.height - 1 - bottom ) )
                    break;
            }
            if ( bottom >= border )
            {
                bottom = 0;
            }
        }
        for ( left = 0; left < w4; ++left )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, left ) )
                break;
        }
        for ( right = 0; right < w4; ++right )
        {
            if ( ! column_all_dark( vid_buf, top, bottom, vid_info.geometry.width - 1 - right ) )
                break;
        }

        // only record the result if all the crops are less than a quarter of
        // the frame otherwise we can get fooled by frames with a lot of black
        // like titles, credits & fade-thru-black transitions.
        if ( top < h4 && bottom < h4 && left < w4 && right < w4 )
        {
            record_crop( crops, top, bottom, left, right );
        }
        ++npreviews;

skip_preview:
        /* Make sure we found audio rates and bitrates */
        for( j = 0; j < hb_list_count( title->list_audio ); j++ )
        {
            hb_audio_t * audio = hb_list_item( title->list_audio, j );
            if ( audio->priv.scan_cache )
            {
                hb_fifo_flush( audio->priv.scan_cache );
            }
        }
        if (vid_buf)
        {
            hb_buffer_close( &vid_buf );
        }
        if (abort)
        {
            break;
        }
    }
    UpdateState3(data, i);

    vid_decoder->close( vid_decoder );
    free( vid_decoder );

    if (stream != NULL)
    {
        hb_stream_close(&stream);
    }

    if ( npreviews )
    {
        // use the most common frame info for our final title dimensions
        hb_work_info_t vid_info;
        most_common_info( info_list, &vid_info );

        title->has_resolution_change = has_resolution_change( info_list );
        if ( title->video_codec_name == NULL )
        {
            title->video_codec_name = strdup( vid_info.name );
        }
        title->geometry.width = vid_info.geometry.width;
        title->geometry.height = vid_info.geometry.height;
        if (vid_info.rate.num && vid_info.rate.den)
        {
            // if the frame rate is very close to one of our "common"
            // framerates, assume it actually is said frame rate;
            // e.g. some 24000/1001 sources may have a rate.den of 1126124
            // instead of 1126125
            const hb_rate_t *video_framerate = NULL;
            while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
            {
                if (is_close_to(vid_info.rate.den, video_framerate->rate, 100))
                {
                    vid_info.rate.den = video_framerate->rate;
                    break;
                }
            }
            title->vrate = vid_info.rate;
            if( vid_info.rate.den == 900900 )
            {
                if (vid_samples >= 4 && pulldown_count >= vid_samples / 4)
                {
                    title->vrate.den = 1126125;
                    hb_deep_log( 2, "Pulldown detected, setting fps to 23.976" );
                }
                if (vid_samples >= 2 && progressive_count >= vid_samples / 2)
                {
                    // We've already deduced that the frame rate is 23.976,
                    // so set it back again.
                    title->vrate.den = 1126125;
                    hb_deep_log( 2, "Title's mostly NTSC Film, setting fps to 23.976" );
                }
            }
            if (vid_samples >= 2 && doubled_frame_count >= 3 * vid_samples / 4)
            {
                // We've detected that a significant number of the frames
                // have been doubled in duration by repeat flags.
                title->vrate.den = 2 * vid_info.rate.den;
                hb_deep_log(2, "Repeat frames detected, setting fps to %.3f",
                            (float)title->vrate.num / title->vrate.den );
            }
        }
        title->video_bitrate = vid_info.bitrate;

        if( vid_info.geometry.par.num && vid_info.geometry.par.den )
        {
            title->geometry.par = vid_info.geometry.par;
        }
        title->color_prim = vid_info.color_prim;
        title->color_transfer = vid_info.color_transfer;
        title->color_matrix = vid_info.color_matrix;

        title->video_decode_support = vid_info.video_decode_support;

        // TODO: check video dimensions
        title->opencl_support = !!hb_opencl_available();

        // compute the aspect ratio based on the storage dimensions and PAR.
        hb_reduce(&title->dar.num, &title->dar.den,
                  title->geometry.par.num * title->geometry.width,
                  title->geometry.height * title->geometry.par.den);

        // For unknown reasons some French PAL DVDs put the original
        // content's aspect ratio into the mpeg PAR even though it's
        // the wrong PAR for the DVD. Apparently they rely on the fact
        // that DVD players ignore the content PAR and just use the
        // aspect ratio from the DVD metadata. So, if the aspect computed
        // from the PAR is different from the container's aspect we use
        // the container's aspect & recompute the PAR from it.
        if (data->dvd &&
            (title->dar.num != title->container_dar.num ||
             title->dar.den != title->container_dar.den))
        {
            hb_log("scan: content PAR gives wrong aspect %d:%d; "
                   "using container aspect %d:%d",
                   title->dar.num, title->dar.den,
                   title->container_dar.num, title->container_dar.den);
            title->dar = title->container_dar;
            hb_reduce(&title->geometry.par.num, &title->geometry.par.den,
                      title->geometry.height * title->dar.num,
                      title->geometry.width * title->dar.den);
        }

        // don't try to crop unless we got at least 3 previews
        if ( crops->n > 2 )
        {
            sort_crops( crops );
            // The next line selects median cropping - at least
            // 50% of the frames will have their borders removed.
            // Other possible choices are loose cropping (i = 0) where 
            // no non-black pixels will be cropped from any frame and a
            // tight cropping (i = crops->n - (crops->n >> 2)) where at
            // least 75% of the frames will have their borders removed.
            i = crops->n >> 1;
            title->crop[0] = EVEN( crops->t[i] );
            title->crop[1] = EVEN( crops->b[i] );
            title->crop[2] = EVEN( crops->l[i] );
            title->crop[3] = EVEN( crops->r[i] );
        }

        hb_log( "scan: %d previews, %dx%d, %.3f fps, autocrop = %d/%d/%d/%d, "
                "aspect %s, PAR %d:%d",
                npreviews, title->geometry.width, title->geometry.height,
                (float)title->vrate.num / title->vrate.den,
                title->crop[0], title->crop[1], title->crop[2], title->crop[3],
                aspect_to_string(&title->dar),
                title->geometry.par.num, title->geometry.par.den);

        if (title->video_decode_support != HB_DECODE_SUPPORT_SW)
        {
            hb_log("scan: supported video decoders:%s%s%s",
                   !(title->video_decode_support & HB_DECODE_SUPPORT_SW)    ? "" : " avcodec",
                   !(title->video_decode_support & HB_DECODE_SUPPORT_QSV)   ? "" : " qsv",
                   !(title->video_decode_support & HB_DECODE_SUPPORT_DXVA2) ? "" : " dxva2");
        }

        if( interlaced_preview_count >= ( npreviews / 2 ) )
        {
            hb_log("Title is likely interlaced or telecined (%i out of %i previews). You should do something about that.",
                   interlaced_preview_count, npreviews);
            title->detected_interlacing = 1;
        }
        else
        {
            title->detected_interlacing = 0;
        }
    }
Exemplo n.º 27
0
hb_buffer_t* hb_audio_resample(hb_audio_resample_t *resample,
                               uint8_t **samples, int nsamples)
{
    if (resample == NULL)
    {
        hb_error("hb_audio_resample: resample is NULL");
        return NULL;
    }
    if (resample->resample_needed && resample->avresample == NULL)
    {
        hb_error("hb_audio_resample: resample needed but libavresample context "
                 "is NULL");
        return NULL;
    }

    hb_buffer_t *out;
    int out_size, out_samples;

    if (resample->resample_needed)
    {
        int in_linesize, out_linesize;
        // set in/out linesize and out_size
        av_samples_get_buffer_size(&in_linesize,
                                   resample->resample.channels, nsamples,
                                   resample->resample.sample_fmt, 0);
        out_size = av_samples_get_buffer_size(&out_linesize,
                                              resample->out.channels, nsamples,
                                              resample->out.sample_fmt, 0);
        out = hb_buffer_init(out_size);

        out_samples = avresample_convert(resample->avresample,
                                         &out->data, out_linesize, nsamples,
                                         samples,     in_linesize, nsamples);

        if (out_samples <= 0)
        {
            if (out_samples < 0)
                hb_log("hb_audio_resample: avresample_convert() failed");
            // don't send empty buffers downstream (EOF)
            hb_buffer_close(&out);
            return NULL;
        }
        out->size = (out_samples *
                     resample->out.sample_size * resample->out.channels);
    }
    else
    {
        out_samples = nsamples;
        out_size = (out_samples *
                    resample->out.sample_size * resample->out.channels);
        out = hb_buffer_init(out_size);
        memcpy(out->data, samples[0], out_size);
    }

    /*
     * Dual Mono to Mono.
     *
     * Copy all left or right samples to the first half of the buffer and halve
     * the buffer size.
     */
    if (resample->dual_mono_downmix)
    {
        int ii, jj = !!resample->dual_mono_right_only;
        int sample_size = resample->out.sample_size;
        uint8_t *audio_samples = out->data;
        for (ii = 0; ii < out_samples; ii++)
        {
            memcpy(audio_samples + (ii * sample_size),
                   audio_samples + (jj * sample_size), sample_size);
            jj += 2;
        }
        out->size = out_samples * sample_size;
    }

    return out;
}
Exemplo n.º 28
0
static int encavcodecaInit(hb_work_object_t *w, hb_job_t *job)
{
    AVCodec *codec;
    AVCodecContext *context;
    hb_audio_t *audio = w->audio;

    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    w->private_data       = pv;
    pv->job               = job;
    pv->list              = hb_list_init();

    // channel count, layout and matrix encoding
    int matrix_encoding;
    uint64_t channel_layout   = hb_ff_mixdown_xlat(audio->config.out.mixdown,
                                                   &matrix_encoding);
    pv->out_discrete_channels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    // default settings and options
    AVDictionary *av_opts          = NULL;
    const char *codec_name         = NULL;
    enum AVCodecID codec_id        = AV_CODEC_ID_NONE;
    enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
    int bits_per_raw_sample        = 0;
    int profile                    = FF_PROFILE_UNKNOWN;

    // override with encoder-specific values
    switch (audio->config.out.codec)
    {
        case HB_ACODEC_AC3:
            codec_id = AV_CODEC_ID_AC3;
            if (matrix_encoding != AV_MATRIX_ENCODING_NONE)
                av_dict_set(&av_opts, "dsur_mode", "on", 0);
            break;

        case HB_ACODEC_FFEAC3:
            codec_id = AV_CODEC_ID_EAC3;
            if (matrix_encoding != AV_MATRIX_ENCODING_NONE)
                av_dict_set(&av_opts, "dsur_mode", "on", 0);
            break;

        case HB_ACODEC_FDK_AAC:
        case HB_ACODEC_FDK_HAAC:
            codec_name          = "libfdk_aac";
            sample_fmt          = AV_SAMPLE_FMT_S16;
            bits_per_raw_sample = 16;
            switch (audio->config.out.codec)
            {
                case HB_ACODEC_FDK_HAAC:
                    profile = FF_PROFILE_AAC_HE;
                    break;
                default:
                    profile = FF_PROFILE_AAC_LOW;
                    break;
            }
            // Libav's libfdk-aac wrapper expects back channels for 5.1
            // audio, and will error out unless we translate the layout
            if (channel_layout == AV_CH_LAYOUT_5POINT1)
                channel_layout  = AV_CH_LAYOUT_5POINT1_BACK;
            break;

        case HB_ACODEC_FFAAC:
            codec_name = "aac";
            av_dict_set(&av_opts, "stereo_mode", "ms_off", 0);
            break;

        case HB_ACODEC_FFFLAC:
        case HB_ACODEC_FFFLAC24:
            codec_id = AV_CODEC_ID_FLAC;
            switch (audio->config.out.codec)
            {
                case HB_ACODEC_FFFLAC24:
                    sample_fmt          = AV_SAMPLE_FMT_S32;
                    bits_per_raw_sample = 24;
                    break;
                default:
                    sample_fmt          = AV_SAMPLE_FMT_S16;
                    bits_per_raw_sample = 16;
                    break;
            }
            break;

        default:
            hb_error("encavcodecaInit: unsupported codec (0x%x)",
                     audio->config.out.codec);
            return 1;
    }
    if (codec_name != NULL)
    {
        codec = avcodec_find_encoder_by_name(codec_name);
        if (codec == NULL)
        {
            hb_error("encavcodecaInit: avcodec_find_encoder_by_name(%s) failed",
                     codec_name);
            return 1;
        }
    }
    else
    {
        codec = avcodec_find_encoder(codec_id);
        if (codec == NULL)
        {
            hb_error("encavcodecaInit: avcodec_find_encoder(%d) failed",
                     codec_id);
            return 1;
        }
    }
    // allocate the context and apply the settings
    context                      = avcodec_alloc_context3(codec);
    hb_ff_set_sample_fmt(context, codec, sample_fmt);
    context->bits_per_raw_sample = bits_per_raw_sample;
    context->profile             = profile;
    context->channel_layout      = channel_layout;
    context->channels            = pv->out_discrete_channels;
    context->sample_rate         = audio->config.out.samplerate;

    if (audio->config.out.bitrate > 0)
    {
        context->bit_rate = audio->config.out.bitrate * 1000;
    }
    else if (audio->config.out.quality >= 0)
    {
        context->global_quality = audio->config.out.quality * FF_QP2LAMBDA;
        context->flags |= CODEC_FLAG_QSCALE;
        if (audio->config.out.codec == HB_ACODEC_FDK_AAC ||
            audio->config.out.codec == HB_ACODEC_FDK_HAAC)
        {
            char vbr[2];
            snprintf(vbr, 2, "%.1g", audio->config.out.quality);
            av_dict_set(&av_opts, "vbr", vbr, 0);
        }
    }

    if (audio->config.out.compression_level >= 0)
    {
        context->compression_level = audio->config.out.compression_level;
    }

    // For some codecs, libav requires the following flag to be set
    // so that it fills extradata with global header information.
    // If this flag is not set, it inserts the data into each
    // packet instead.
    context->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (hb_avcodec_open(context, codec, &av_opts, 0))
    {
        hb_error("encavcodecaInit: hb_avcodec_open() failed");
        return 1;
    }
    // avcodec_open populates the opts dictionary with the
    // things it didn't recognize.
    AVDictionaryEntry *t = NULL;
    while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX)))
    {
        hb_log("encavcodecaInit: Unknown avcodec option %s", t->key);
    }
    av_dict_free(&av_opts);

    pv->context           = context;
    audio->config.out.samples_per_frame =
    pv->samples_per_frame = context->frame_size;
    pv->input_samples     = context->frame_size * context->channels;
    pv->input_buf         = malloc(pv->input_samples * sizeof(float));
    // Some encoders in libav (e.g. fdk-aac) fail if the output buffer
    // size is not some minumum value.  8K seems to be enough :(
    pv->max_output_bytes  = MAX(FF_MIN_BUFFER_SIZE,
                                (pv->input_samples *
                                 av_get_bytes_per_sample(context->sample_fmt)));

    // sample_fmt conversion
    if (context->sample_fmt != AV_SAMPLE_FMT_FLT)
    {
        pv->output_buf = malloc(pv->max_output_bytes);
        pv->avresample = avresample_alloc_context();
        if (pv->avresample == NULL)
        {
            hb_error("encavcodecaInit: avresample_alloc_context() failed");
            return 1;
        }
        av_opt_set_int(pv->avresample, "in_sample_fmt",
                       AV_SAMPLE_FMT_FLT, 0);
        av_opt_set_int(pv->avresample, "out_sample_fmt",
                       context->sample_fmt, 0);
        av_opt_set_int(pv->avresample, "in_channel_layout",
                       context->channel_layout, 0);
        av_opt_set_int(pv->avresample, "out_channel_layout",
                       context->channel_layout, 0);
        if (hb_audio_dither_is_supported(audio->config.out.codec))
        {
            // dithering needs the sample rate
            av_opt_set_int(pv->avresample, "in_sample_rate",
                           context->sample_rate, 0);
            av_opt_set_int(pv->avresample, "out_sample_rate",
                           context->sample_rate, 0);
            av_opt_set_int(pv->avresample, "dither_method",
                           audio->config.out.dither_method, 0);
        }
        if (avresample_open(pv->avresample))
        {
            hb_error("encavcodecaInit: avresample_open() failed");
            avresample_free(&pv->avresample);
            return 1;
        }
    }
    else
    {
        pv->avresample = NULL;
        pv->output_buf = pv->input_buf;
    }

    if (context->extradata != NULL)
    {
        memcpy(w->config->extradata.bytes, context->extradata,
               context->extradata_size);
        w->config->extradata.length = context->extradata_size;
    }

    audio->config.out.delay = av_rescale_q(context->delay, context->time_base,
                                           (AVRational){1, 90000});

    return 0;
}
Exemplo n.º 29
0
hb_audio_remap_t* hb_audio_remap_init(enum AVSampleFormat sample_fmt,
                                      hb_chan_map_t *channel_map_out,
                                      hb_chan_map_t *channel_map_in)
{
    hb_audio_remap_t *remap = calloc(1, sizeof(hb_audio_remap_t));
    if (remap == NULL)
    {
        hb_error("hb_audio_remap_init: failed to allocate remap");
        goto fail;
    }

    // sample format
    switch (sample_fmt)
    {
        case AV_SAMPLE_FMT_U8P:
        case AV_SAMPLE_FMT_S16P:
        case AV_SAMPLE_FMT_S32P:
        case AV_SAMPLE_FMT_FLTP:
        case AV_SAMPLE_FMT_DBLP:
            remap->remap = &remap_planar;
            break;

        case AV_SAMPLE_FMT_U8:
            remap->remap = &remap_u8_interleaved;
            break;

        case AV_SAMPLE_FMT_S16:
            remap->remap = &remap_s16_interleaved;
            break;

        case AV_SAMPLE_FMT_S32:
            remap->remap = &remap_s32_interleaved;
            break;

        case AV_SAMPLE_FMT_FLT:
            remap->remap = &remap_flt_interleaved;
            break;

        case AV_SAMPLE_FMT_DBL:
            remap->remap = &remap_dbl_interleaved;
            break;

        default:
            hb_error("hb_audio_remap_init: unsupported sample format '%s'",
                     av_get_sample_fmt_name(sample_fmt));
            goto fail;
    }

    // input/output channel order
    if (channel_map_in == NULL || channel_map_out == NULL)
    {
        hb_error("hb_audio_remap_init: invalid channel map(s)");
        goto fail;
    }
    remap->channel_map_in  = channel_map_in;
    remap->channel_map_out = channel_map_out;

    // remap can't be done until the channel layout has been set
    remap->remap_needed = 0;

    return remap;

fail:
    hb_audio_remap_free(remap);
    return NULL;
}
Exemplo n.º 30
0
static void
gval_write(FILE *file, hb_value_t *gval)
{
    static int indent = 0;
    int ii;
    hb_value_type_t gtype;

    if (gval == NULL) return;
    gtype = hb_value_type(gval);
    if (gtype == HB_VALUE_TYPE_ARRAY)
    {
        hb_value_t *val;
        int count;

        indent_fprintf(file, indent, "<array>\n");
        indent++;
        count = hb_value_array_len(gval);
        for (ii = 0; ii < count; ii++)
        {
            val = hb_value_array_get(gval, ii);
            gval_write(file, val);
        }
        indent--;
        indent_fprintf(file, indent, "</array>\n");
    }
    else if (gtype == HB_VALUE_TYPE_DICT)
    {
        const char *key;
        hb_value_t *val;
        hb_dict_iter_t iter;

        indent_fprintf(file, indent, "<dict>\n");
        indent++;

        for (iter = hb_dict_iter_init(gval);
             iter != HB_DICT_ITER_DONE;
             iter = hb_dict_iter_next(gval, iter))
        {
            key = hb_dict_iter_key(iter);
            val = hb_dict_iter_value(iter);
            indent_fprintf(file, indent, "<key>%s</key>\n", key);
            gval_write(file, val);
        }

        indent--;
        indent_fprintf(file, indent, "</dict>\n");
    }
    else if (gtype == HB_VALUE_TYPE_BOOL)
    {
        char *tag;
        if (hb_value_get_bool(gval))
        {
            tag = "true";
        }
        else
        {
            tag = "false";
        }
        indent_fprintf(file, indent, "<%s />\n", tag);
    }
    else if (gtype == HB_VALUE_TYPE_DOUBLE)
    {
        double val = hb_value_get_double(gval);
        indent_fprintf(file, indent, "<real>%.17g</real>\n", val);
    }
    else if (gtype == HB_VALUE_TYPE_INT)
    {
        int64_t val = hb_value_get_int(gval);
        indent_fprintf(file, indent, "<integer>%"PRId64"</integer>\n", val);
    }
    else if (gtype == HB_VALUE_TYPE_STRING)
    {
        const char *str = hb_value_get_string(gval);
        char *esc = markup_escape_text(str);
        indent_fprintf(file, indent, "<string>%s</string>\n", esc);
        free(esc);
    }
    else
    {
        // Try to make anything thats unrecognized into a string
        hb_error("Unhandled data type %d", gtype);
    }
}