void hb_qsv_info_print() { if (hb_qsv_info == NULL) { hb_error("hb_qsv_info_print: QSV info not initialized!"); } // is QSV available? hb_log("Intel Quick Sync Video support: %s", hb_qsv_available() ? "yes": "no"); // if we have Quick Sync Video support, also print the details if (hb_qsv_available()) { if (qsv_hardware_available) { hb_log(" - Intel Media SDK hardware: API %d.%d (minimum: %d.%d)", qsv_hardware_version.Major, qsv_hardware_version.Minor, qsv_minimum_version.Major, qsv_minimum_version.Minor); } if (qsv_software_available) { hb_log(" - Intel Media SDK software: API %d.%d (minimum: %d.%d)", qsv_software_version.Major, qsv_software_version.Minor, qsv_minimum_version.Major, qsv_minimum_version.Minor); } hb_log(" - Preferred implementation: %s", hb_qsv_impl_get_name(preferred_implementation)); } }
/** * It creates a DirectX video service */ static int hb_dx_create_video_service( hb_va_dxva2_t *dxva2 ) { HRESULT (WINAPI *CreateVideoService)( IDirect3DDevice9 *, REFIID riid, void **ppService ); CreateVideoService = (void*)GetProcAddress( dxva2->hdxva2_dll, TEXT( "DXVA2CreateVideoService" )); if( !CreateVideoService ) { hb_log( "dxva2:cannot load function" ); return HB_WORK_ERROR; } HRESULT hr; HANDLE device; hr = IDirect3DDeviceManager9_OpenDeviceHandle( dxva2->devmng, &device ); if( FAILED( hr )) { hb_log( "dxva2:OpenDeviceHandle failed" ); return HB_WORK_ERROR; } dxva2->device = device; IDirectXVideoDecoderService *vs; hr = IDirect3DDeviceManager9_GetVideoService( dxva2->devmng, device, &IID_IDirectXVideoDecoderService, (void*)&vs ); if( FAILED( hr )) { hb_log( "dxva2:GetVideoService failed" ); return HB_WORK_ERROR; } dxva2->vs = vs; return HB_WORK_OK; }
/** * It creates a Direct3D device manager */ static int hb_d3d_create_device_manager( hb_va_dxva2_t *dxva2 ) { HRESULT(WINAPI *CreateDeviceManager9)( UINT *pResetToken, IDirect3DDeviceManager9 ** ); CreateDeviceManager9 = (void*)GetProcAddress( dxva2->hdxva2_dll, TEXT( "DXVA2CreateDirect3DDeviceManager9" )); if( !CreateDeviceManager9 ) { hb_log( "dxva2:cannot load function" ); return HB_WORK_ERROR; } UINT token; IDirect3DDeviceManager9 *devmng; if( FAILED( CreateDeviceManager9( &token, &devmng ))) { hb_log( "dxva2:OurDirect3DCreateDeviceManager9 failed" ); return HB_WORK_ERROR; } dxva2->token = token; dxva2->devmng = devmng; long hr = IDirect3DDeviceManager9_ResetDevice( devmng, dxva2->d3ddev, token ); if( FAILED( hr )) { hb_log( "dxva2:IDirect3DDeviceManager9_ResetDevice failed: %08x", (unsigned)hr ); return HB_WORK_ERROR; } return HB_WORK_OK; }
/** * It creates a DirectX video service */ static int hb_d3d_create_device( hb_va_dxva2_t *dxva2 ) { LPDIRECT3D9 (WINAPI *Create9)( UINT SDKVersion ); Create9 = (void*)GetProcAddress( dxva2->hd3d9_dll, TEXT( "Direct3DCreate9" )); if( !Create9 ) { hb_log( "dxva2:Cannot locate reference to Direct3DCreate9 ABI in DLL" ); return HB_WORK_ERROR; } LPDIRECT3D9 d3dobj; d3dobj = Create9( D3D_SDK_VERSION ); if( !d3dobj ) { hb_log( "dxva2:Direct3DCreate9 failed" ); return HB_WORK_ERROR; } dxva2->d3dobj = d3dobj; D3DADAPTER_IDENTIFIER9 *d3dai = &dxva2->d3dai; if( FAILED( IDirect3D9_GetAdapterIdentifier( dxva2->d3dobj, D3DADAPTER_DEFAULT, 0, d3dai ))) { hb_log( "dxva2:IDirect3D9_GetAdapterIdentifier failed" ); memset( d3dai, 0, sizeof(*d3dai)); } PROCGETSHELLWND GetShellWindow; HMODULE hUser32 = GetModuleHandle( "user32" ); GetShellWindow = (PROCGETSHELLWND) GetProcAddress( hUser32, "GetShellWindow" ); D3DPRESENT_PARAMETERS *d3dpp = &dxva2->d3dpp; memset( d3dpp, 0, sizeof(*d3dpp)); d3dpp->Flags = D3DPRESENTFLAG_VIDEO; d3dpp->Windowed = TRUE; d3dpp->hDeviceWindow = NULL; d3dpp->SwapEffect = D3DSWAPEFFECT_DISCARD; d3dpp->MultiSampleType = D3DMULTISAMPLE_NONE; d3dpp->PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT; d3dpp->BackBufferCount = 0; /* FIXME what to put here */ d3dpp->BackBufferFormat = D3DFMT_X8R8G8B8; /* FIXME what to put here */ d3dpp->BackBufferWidth = 0; d3dpp->BackBufferHeight = 0; d3dpp->EnableAutoDepthStencil = FALSE; LPDIRECT3DDEVICE9 d3ddev; //if (FAILED(IDirect3D9_CreateDevice(d3dobj, D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, GetShellWindow(), D3DCREATE_SOFTWARE_VERTEXPROCESSING|D3DCREATE_MULTITHREADED, d3dpp, &d3ddev))) if( FAILED( IDirect3D9_CreateDevice( d3dobj, D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, GetShellWindow(), D3DCREATE_HARDWARE_VERTEXPROCESSING|D3DCREATE_MULTITHREADED, d3dpp, &d3ddev ))) { hb_log( "dxva2:IDirect3D9_CreateDevice failed" ); return HB_WORK_ERROR; } dxva2->d3ddev = d3ddev; return HB_WORK_OK; }
static int next_packet( BLURAY *bd, uint8_t *pkt ) { int result; while ( 1 ) { result = bd_read( bd, pkt, 192 ); if ( result < 0 ) { return -1; } if ( result < 192 ) { return 0; } // Sync byte is byte 4. 0-3 are timestamp. if (pkt[4] == 0x47) { return 1; } // lost sync - back up to where we started then try to re-establish. uint64_t pos = bd_tell(bd); uint64_t pos2 = align_to_next_packet(bd, pkt); if ( pos2 == 0 ) { hb_log( "next_packet: eof while re-establishing sync @ %"PRId64, pos ); return 0; } hb_log( "next_packet: sync lost @ %"PRId64", regained after %"PRId64" bytes", pos, pos2 ); } }
hb_net_t * hb_net_open( char * address, int port ) { hb_net_t * n = calloc( sizeof( hb_net_t ), 1 ); struct sockaddr_in sock; struct hostent * host; #ifdef SYS_MINGW WSADATA wsaData; int iResult, winsock_init = 0; // Initialize Winsock if (!winsock_init) { iResult = WSAStartup(MAKEWORD(2, 2), &wsaData); if (iResult != 0) { hb_log("WSAStartup failed: %d", iResult); free(n); return NULL; } winsock_init = 1; } #endif /* TODO: find out why this doesn't work on Win32 */ if( !( host = gethostbyname( address ) ) ) { hb_log( "gethostbyname failed (%s)", address ); free( n ); return NULL; } memset( &sock, 0, sizeof( struct sockaddr_in ) ); sock.sin_family = host->h_addrtype; sock.sin_port = htons( port ); memcpy( &sock.sin_addr, host->h_addr, host->h_length ); if( ( n->socket = socket( host->h_addrtype, SOCK_STREAM, 0 ) ) < 0 ) { hb_log( "socket failed" ); free( n ); return NULL; } if( connect( n->socket, (struct sockaddr *) &sock, sizeof( struct sockaddr_in ) ) < 0 ) { hb_log( "connect failed" ); free( n ); return NULL; } return n; }
void hb_dict_set( hb_dict_t ** dict_ptr, const char * key, const char * value ) { hb_dict_t * dict = *dict_ptr; if( !dict ) { hb_log( "hb_dict_set: NULL dictionary" ); return; } if( !key || !strlen( key ) ) return; hb_dict_entry_t * entry = hb_dict_get( dict, key ); if( entry ) { if( entry->value ) { if( value && !strcmp( value, entry->value ) ) return; else { free( entry->value ); entry->value = NULL; } } if( value && strlen( value ) ) entry->value = strdup( value ); } else { if( dict->alloc <= dict->count ) { hb_dict_entry_t * tmp = NULL; tmp = malloc( ( 2 * dict->alloc ) * sizeof( hb_dict_entry_t ) ); if( !tmp ) { hb_log( "ERROR: could not realloc hb_dict_t objects" ); return; } if( dict->objects ) { if( dict->count ) memcpy( tmp, dict->objects, dict->count * sizeof( hb_dict_entry_t ) ); free( dict->objects ); } dict->objects = tmp; dict->alloc *= 2; } dict->objects[dict->count].key = strdup( key ); if( value && strlen( value ) ) dict->objects[dict->count].value = strdup( value ); else dict->objects[dict->count].value = NULL; dict->count++; } }
/*********************************************************************** * Close Video *********************************************************************** * **********************************************************************/ void syncVideoClose( hb_work_object_t * w ) { hb_work_private_t * pv = w->private_data; hb_job_t * job = pv->job; hb_sync_video_t * sync = &pv->type.video; // Wake up audio sync if it's still waiting on condition. pv->common->pts_offset = 0; pv->common->start_found = 1; hb_cond_broadcast( pv->common->next_frame ); if( sync->cur ) { hb_buffer_close( &sync->cur ); } hb_log( "sync: got %d frames, %d expected", pv->common->count_frames, sync->count_frames_max ); /* save data for second pass */ if( job->pass == 1 ) { /* Preserve frame count for better accuracy in pass 2 */ hb_interjob_t * interjob = hb_interjob_get( job->h ); interjob->frame_count = pv->common->count_frames; interjob->last_job = job->sequence_id; } if (sync->drops || sync->dups ) { hb_log( "sync: %d frames dropped, %d duplicated", sync->drops, sync->dups ); } hb_lock( pv->common->mutex ); if ( --pv->common->ref == 0 ) { hb_unlock( pv->common->mutex ); hb_cond_close( &pv->common->next_frame ); hb_lock_close( &pv->common->mutex ); free( pv->common->first_pts ); free( pv->common ); } else { hb_unlock( pv->common->mutex ); } free( pv ); w->private_data = NULL; }
static int AVIEnd( hb_mux_object_t * m ) { hb_job_t * job = m->job; hb_log( "muxavi: writing index" ); AddIndex( m ); hb_log( "muxavi: closing %s", job->file ); fclose( m->file ); hb_buffer_close( &m->index ); return 0; }
static int decutf8Work(hb_work_object_t * w, hb_buffer_t **buf_in, hb_buffer_t **buf_out) { hb_work_private_t * pv = w->private_data; hb_buffer_t * in = *buf_in; hb_buffer_t *out = *buf_in; *buf_in = NULL; if (in->s.flags & HB_BUF_FLAG_EOF) { *buf_out = in; return HB_WORK_DONE; } // Warn if the subtitle's duration has not been passed through by the // demuxer, which will prevent the subtitle from displaying at all if (out->s.stop == 0) { hb_log("decutf8sub: subtitle packet lacks duration"); } hb_srt_to_ssa(out, ++pv->line); out->s.frametype = HB_FRAME_SUBTITLE; *buf_out = out; return HB_WORK_OK; }
/*********************************************************************** * hb_batch_title_scan **********************************************************************/ hb_title_t * hb_batch_title_scan( hb_batch_t * d, int t ) { hb_title_t * title; char * filename; hb_stream_t * stream; if ( t < 0 ) return NULL; filename = hb_list_item( d->list_file, t - 1 ); if ( filename == NULL ) return NULL; hb_log( "batch: scanning %s", filename ); title = hb_title_init( filename, 0 ); stream = hb_stream_open( filename, title, 1 ); if ( stream == NULL ) { hb_title_close( &title ); return NULL; } title = hb_stream_title_scan( stream, title ); hb_stream_close( &stream ); if ( title != NULL ) { title->index = t; } return title; }
int hb_ocl_scale(hb_buffer_t *in, hb_buffer_t *out, int *crop, hb_oclscale_t *os) { void *data[13]; if (do_scale_init() == 0) return 0; data[0] = in->cl.buffer; data[1] = out->cl.buffer; data[2] = (void*)(crop[0]); data[3] = (void*)(crop[1]); data[4] = (void*)(crop[2]); data[5] = (void*)(crop[3]); data[6] = (void*)(in->f.width); data[7] = (void*)(in->f.height); data[8] = (void*)(out->f.width); data[9] = (void*)(out->f.height); data[10] = os; data[11] = in; data[12] = out; if( !hb_run_kernel( "frame_scale", data ) ) hb_log( "run kernel[%s] failed", "frame_scale" ); return 0; }
static int decutf8Work(hb_work_object_t * w, hb_buffer_t **buf_in, hb_buffer_t **buf_out) { hb_work_private_t * pv = w->private_data; // Pass the packets through without modification hb_buffer_t *out = *buf_in; out->s.frametype = HB_FRAME_SUBTITLE; // Warn if the subtitle's duration has not been passed through by the // demuxer, which will prevent the subtitle from displaying at all if (out->s.stop == 0) { hb_log("decutf8sub: subtitle packet lacks duration"); } hb_srt_to_ssa(out, ++pv->line); *buf_in = NULL; *buf_out = out; if (out->size == 0) return HB_WORK_DONE; return HB_WORK_OK; }
/** * lock frame data form surface. * nv12 to yuv with opencl and with C reference * scale with opencl */ int hb_va_extract( hb_va_dxva2_t *dxva2, uint8_t *dst, AVFrame *frame, int job_w, int job_h, int *crop, hb_oclscale_t *os, int use_opencl, int use_decomb, int use_detelecine ) { LPDIRECT3DSURFACE9 d3d = (LPDIRECT3DSURFACE9)(uintptr_t)frame->data[3]; D3DLOCKED_RECT lock; if( FAILED( IDirect3DSurface9_LockRect( d3d, &lock, NULL, D3DLOCK_READONLY ))) { hb_log( "dxva2:Failed to lock surface" ); return HB_WORK_ERROR; } if( dxva2->render == MAKEFOURCC( 'N', 'V', '1', '2' )) { uint8_t *plane[2] = { lock.pBits, (uint8_t*)lock.pBits + lock.Pitch * dxva2->surface_height }; size_t pitch[2] = { lock.Pitch, lock.Pitch, }; hb_copy_from_nv12( dst, plane, pitch, dxva2->width, dxva2->height ); } IDirect3DSurface9_UnlockRect( d3d ); return HB_WORK_OK; }
int encsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in, hb_buffer_t ** buf_out ) { hb_buffer_t * in = *buf_in; if (w->subtitle->source != VOBSUB) { // Invalid source, send EOF, this shouldn't ever happen hb_log("encvobsub: invalid subtitle source"); hb_buffer_close( buf_in ); *buf_out = hb_buffer_init(0); } if ( in->size <= 0 ) { /* EOF on input stream - send it downstream & say that we're done */ *buf_out = in; *buf_in = NULL; return HB_WORK_DONE; } /* * Not much to do, just pass the buffer on. * Some day, we may re-encode bd subtitles here ;) */ if (buf_out) { *buf_out = in; *buf_in = NULL; } return HB_WORK_OK; }
static int decsubInit( hb_work_object_t * w, hb_job_t * job ) { AVCodec *codec = avcodec_find_decoder( AV_CODEC_ID_HDMV_PGS_SUBTITLE ); AVCodecContext *context = avcodec_alloc_context3( codec ); context->codec = codec; hb_work_private_t * pv; pv = calloc( 1, sizeof( hb_work_private_t ) ); w->private_data = pv; hb_buffer_list_clear(&pv->list); hb_buffer_list_clear(&pv->list_pass); pv->discard_subtitle = 1; pv->seen_forced_sub = 0; pv->last_pts = AV_NOPTS_VALUE; pv->context = context; pv->job = job; // Set decoder opts... AVDictionary * av_opts = NULL; // e.g. av_dict_set( &av_opts, "refcounted_frames", "1", 0 ); if (hb_avcodec_open(pv->context, codec, &av_opts, 0)) { av_dict_free( &av_opts ); hb_log("decsubInit: avcodec_open failed"); return 1; } av_dict_free( &av_opts ); return 0; }
/* Called whenever necessary by AudioConverterFillComplexBuffer */ static OSStatus inInputDataProc( AudioConverterRef converter, UInt32 *npackets, AudioBufferList *buffers, AudioStreamPacketDescription** ignored, void *userdata ) { hb_work_private_t *pv = userdata; if( pv->ibytes == 0 ) { *npackets = 0; hb_log( "CoreAudio: no data to use in inInputDataProc" ); return noErr; } if( pv->buf != NULL ) free( pv->buf ); uint64_t pts, pos; buffers->mBuffers[0].mDataByteSize = MIN( *npackets * pv->isamplesiz, pv->ibytes ); buffers->mBuffers[0].mData = pv->buf = calloc(1 , buffers->mBuffers[0].mDataByteSize ); if( hb_list_bytes( pv->list ) >= buffers->mBuffers[0].mDataByteSize ) { hb_list_getbytes( pv->list, buffers->mBuffers[0].mData, buffers->mBuffers[0].mDataByteSize, &pts, &pos ); } else { hb_log( "CoreAudio: Not enought data, exiting inInputDataProc" ); *npackets = 0; return 1; } *npackets = buffers->mBuffers[0].mDataByteSize / pv->isamplesiz; /* transform data from [-32768,32767] to [-1.0,1.0] */ float *fdata = buffers->mBuffers[0].mData; int i; for( i = 0; i < *npackets * pv->nchannels; i++ ) { fdata[i] = fdata[i] / 32768.f; } pv->ibytes -= buffers->mBuffers[0].mDataByteSize; return noErr; }
int enclameInit( hb_work_object_t * w, hb_job_t * job ) { hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) ); hb_audio_t * audio = w->audio; w->private_data = pv; pv->job = job; hb_log( "enclame: opening libmp3lame" ); pv->lame = lame_init(); // use ABR lame_set_scale( pv->lame, 32768.0 ); if( audio->config.out.compression_level >= 0 ) { lame_set_quality( pv->lame, audio->config.out.compression_level ); } if( audio->config.out.bitrate > 0 ) { lame_set_VBR( pv->lame, vbr_abr ); lame_set_VBR_mean_bitrate_kbps( pv->lame, audio->config.out.bitrate ); } else if( audio->config.out.quality >= 0 ) { lame_set_brate( pv->lame, 0 ); lame_set_VBR( pv->lame, vbr_default ); lame_set_VBR_quality( pv->lame, audio->config.out.quality ); } lame_set_in_samplerate( pv->lame, audio->config.out.samplerate ); lame_set_out_samplerate( pv->lame, audio->config.out.samplerate ); pv->out_discrete_channels = hb_mixdown_get_discrete_channel_count( audio->config.out.mixdown ); // Lame's default encoding mode is JOINT_STEREO. This subtracts signal // that is "common" to left and right (within some threshold) and encodes // it separately. This improves quality at low bitrates, but hurts // imaging (channel separation) at higher bitrates. So if the bitrate // is suffeciently high, use regular STEREO mode. if ( pv->out_discrete_channels == 1 ) { lame_set_mode( pv->lame, MONO ); lame_set_num_channels( pv->lame, 1 ); } else if ( audio->config.out.bitrate >= 128 ) { lame_set_mode( pv->lame, STEREO ); } lame_init_params( pv->lame ); pv->input_samples = 1152 * pv->out_discrete_channels; pv->output_bytes = LAME_MAXMP3BUFFER; pv->buf = malloc( pv->input_samples * sizeof( float ) ); audio->config.out.samples_per_frame = 1152; pv->list = hb_list_init(); pv->pts = -1; return 0; }
/*********************************************************************** * hb_bd_set_angle *********************************************************************** * Sets the angle to read **********************************************************************/ void hb_bd_set_angle( hb_bd_t * d, int angle ) { if ( !bd_select_angle( d->bd, angle) ) { hb_log("bd_select_angle failed"); } }
void nlmeans_init_x86(NLMeansFunctions *functions) { if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) { functions->build_integral = build_integral_sse2; hb_log("NLMeans using SSE2 optimizations"); } }
/* Called whenever necessary by AudioConverterFillComplexBuffer */ static OSStatus inInputDataProc( AudioConverterRef converter, UInt32 *npackets, AudioBufferList *buffers, AudioStreamPacketDescription** ignored, void *userdata ) { hb_work_private_t *pv = userdata; if( pv->ibytes == 0 ) { *npackets = 0; hb_log( "CoreAudio: no data to use in inInputDataProc" ); return 1; } if( pv->buf != NULL ) free( pv->buf ); uint64_t pts, pos; buffers->mBuffers[0].mDataByteSize = MIN( *npackets * pv->isamplesiz, pv->ibytes ); buffers->mBuffers[0].mData = pv->buf = calloc( 1, buffers->mBuffers[0].mDataByteSize ); if( hb_list_bytes( pv->list ) >= buffers->mBuffers[0].mDataByteSize ) { hb_list_getbytes( pv->list, buffers->mBuffers[0].mData, buffers->mBuffers[0].mDataByteSize, &pts, &pos ); } else { hb_log( "CoreAudio: Not enough data, exiting inInputDataProc" ); *npackets = 0; return 1; } if( pv->ichanmap != &hb_qt_chan_map ) { hb_layout_remap( pv->ichanmap, &hb_qt_chan_map, pv->layout, (float*)buffers->mBuffers[0].mData, buffers->mBuffers[0].mDataByteSize / pv->isamplesiz ); } *npackets = buffers->mBuffers[0].mDataByteSize / pv->isamplesiz; pv->ibytes -= buffers->mBuffers[0].mDataByteSize; return noErr; }
static int param_parse(hb_work_private_t *pv, x265_param *param, const char *key, const char *value) { int ret = pv->api->param_parse(param, key, value); // let x265 sanity check the options for us switch (ret) { case X265_PARAM_BAD_NAME: hb_log("encx265: unknown option '%s'", key); break; case X265_PARAM_BAD_VALUE: hb_log("encx265: bad argument '%s=%s'", key, value ? value : "(null)"); break; default: break; } return ret; }
int hb_va_get_frame_buf( hb_va_dxva2_t *dxva2, AVCodecContext *p_context, AVFrame *frame ) { frame->type = FF_BUFFER_TYPE_USER; if( hb_va_get( dxva2, frame ) == HB_WORK_ERROR ) { hb_log( "VaGrabSurface failed" ); return HB_WORK_ERROR; } return HB_WORK_OK; }
void hb_va_new_dxva2( hb_va_dxva2_t *dxva2, AVCodecContext *p_context ) { if( p_context->width > 0 && p_context->height > 0 ) { if( hb_va_setup( dxva2, &p_context->hwaccel_context, p_context->width, p_context->height ) == HB_WORK_ERROR ) { hb_log( "dxva2:hb_va_Setup failed" ); hb_va_close( dxva2 ); dxva2 = NULL; } } if( dxva2 ) { dxva2->input_pts[0] = 0; dxva2->input_pts[1] = 0; if( dxva2->description ) hb_log( "dxva2:Using %s for hardware decoding", dxva2->description ); p_context->draw_horiz_band = NULL; } }
/*********************************************************************** * hb_bd_init *********************************************************************** * **********************************************************************/ hb_bd_t * hb_bd_init( hb_handle_t *h, char * path ) { hb_bd_t * d; int ii; d = calloc( sizeof( hb_bd_t ), 1 ); d->h = h; /* Open device */ d->bd = bd_open( path, NULL ); if( d->bd == NULL ) { /* * Not an error, may be a stream - which we'll try in a moment. */ hb_log( "bd: not a bd - trying as a stream/file instead" ); goto fail; } d->title_count = bd_get_titles( d->bd, TITLES_RELEVANT, 0 ); if ( d->title_count == 0 ) { hb_log( "bd: not a bd - trying as a stream/file instead" ); goto fail; } d->title_info = calloc( sizeof( BLURAY_TITLE_INFO* ) , d->title_count ); for ( ii = 0; ii < d->title_count; ii++ ) { d->title_info[ii] = bd_get_title_info( d->bd, ii, 0 ); } qsort(d->title_info, d->title_count, sizeof( BLURAY_TITLE_INFO* ), title_info_compare_mpls ); d->path = strdup( path ); return d; fail: if( d->bd ) bd_close( d->bd ); free( d ); return NULL; }
/** * create dxva2 service * load library D3D9.dll */ hb_va_dxva2_t * hb_va_create_dxva2( hb_va_dxva2_t *dxva2, int codec_id ) { if( dxva2 ) { hb_va_close( dxva2 ); dxva2 = NULL; } hb_va_dxva2_t *dxva = calloc( 1, sizeof(*dxva) ); if( !dxva ) return NULL; dxva->codec_id = codec_id; dxva->hd3d9_dll = LoadLibrary( TEXT( "D3D9.DLL" ) ); if( !dxva->hd3d9_dll ) { hb_log( "dxva2:cannot load d3d9.dll" ); goto error; } dxva->hdxva2_dll = LoadLibrary( TEXT( "DXVA2.DLL" ) ); if( !dxva->hdxva2_dll ) { hb_log( "dxva2:cannot load DXVA2.dll" ); goto error; } if( hb_d3d_create_device( dxva ) == HB_WORK_ERROR ) { hb_log( "dxva2:Failed to create Direct3D device" ); goto error; } if( hb_d3d_create_device_manager( dxva ) == HB_WORK_ERROR ) { hb_log( "dxva2:D3dCreateDeviceManager failed" ); goto error; } if( hb_dx_create_video_service( dxva ) == HB_WORK_ERROR ) { hb_log( "dxva2:DxCreateVideoService failed" ); goto error; } if( hb_dx_find_video_service_conversion( dxva, &dxva->input, &dxva->render ) == HB_WORK_ERROR ) { hb_log( "dxva2:DxFindVideoServiceConversion failed" ); goto error; } dxva->do_job = HB_WORK_OK; dxva->description = "DXVA2"; return dxva; error: hb_va_close( dxva ); return NULL; }
hb_dict_t * hb_dict_init( int alloc ) { hb_dict_t * dict = NULL; dict = malloc( sizeof( hb_dict_t ) ); if( !dict ) { hb_log( "ERROR: could not allocate hb_dict_t" ); return NULL; } dict->count = 0; dict->objects = malloc( alloc * sizeof( hb_dict_entry_t ) ); if( !dict->objects ) { hb_log( "ERROR: could not allocate hb_dict_t objects" ); dict->alloc = 0; } else { dict->alloc = alloc; } return dict; }
static hb_buffer_t* x265_encode(hb_work_object_t *w, hb_buffer_t *in) { hb_work_private_t *pv = w->private_data; hb_job_t *job = pv->job; x265_picture pic_in, pic_out; x265_nal *nal; uint32_t nnal; pv->api->picture_init(pv->param, &pic_in); pic_in.stride[0] = in->plane[0].stride; pic_in.stride[1] = in->plane[1].stride; pic_in.stride[2] = in->plane[2].stride; pic_in.planes[0] = in->plane[0].data; pic_in.planes[1] = in->plane[1].data; pic_in.planes[2] = in->plane[2].data; pic_in.poc = pv->frames_in++; pic_in.pts = in->s.start; pic_in.bitDepth = 8; if (in->s.new_chap && job->chapter_markers) { /* * Chapters have to start with an IDR frame so request that this * frame be coded as IDR. Since there may be up to 16 frames * currently buffered in the encoder, remember the timestamp so * when this frame finally pops out of the encoder we'll mark * its buffer as the start of a chapter. */ pic_in.sliceType = X265_TYPE_IDR; hb_chapter_enqueue(pv->chapter_queue, in); } else { pic_in.sliceType = X265_TYPE_AUTO; } if (pv->last_stop != AV_NOPTS_VALUE && pv->last_stop != in->s.start) { hb_log("encx265 input continuity err: last stop %"PRId64" start %"PRId64, pv->last_stop, in->s.start); } pv->last_stop = in->s.stop; save_frame_info(pv, in); if (pv->api->encoder_encode(pv->x265, &nal, &nnal, &pic_in, &pic_out) > 0) { return nal_encode(w, &pic_out, nal, nnal); } return NULL; }
/*********************************************************************** * Encode *********************************************************************** * **********************************************************************/ static hb_buffer_t * Encode( hb_work_object_t * w ) { hb_work_private_t * pv = w->private_data; hb_audio_t * audio = w->audio; hb_buffer_t * buf; float samples[2][1152]; uint64_t pts, pos; int i, j; if( hb_list_bytes( pv->list ) < pv->input_samples * sizeof( float ) ) { return NULL; } hb_list_getbytes( pv->list, pv->buf, pv->input_samples * sizeof( float ), &pts, &pos); for( i = 0; i < 1152; i++ ) { for( j = 0; j < pv->out_discrete_channels; j++ ) { samples[j][i] = ((float *) pv->buf)[(pv->out_discrete_channels * i + j)]; } } buf = hb_buffer_init( pv->output_bytes ); buf->s.start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate; buf->s.stop = buf->s.start + 90000 * 1152 / audio->config.out.samplerate; pv->pts = buf->s.stop; buf->size = lame_encode_buffer_float( pv->lame, samples[0], samples[1], 1152, buf->data, LAME_MAXMP3BUFFER ); buf->s.type = AUDIO_BUF; buf->s.frametype = HB_FRAME_AUDIO; if( !buf->size ) { /* Encoding was successful but we got no data. Try to encode more */ hb_buffer_close( &buf ); return Encode( w ); } else if( buf->size < 0 ) { hb_log( "enclame: lame_encode_buffer failed" ); hb_buffer_close( &buf ); return NULL; } return buf; }
int do_scale_init() { if ( s_scale_init_flag==0 ) { int st = hb_register_kernel_wrapper( "frame_scale", hb_ocl_scale_func ); if( !st ) { hb_log( "register kernel[%s] failed", "frame_scale" ); return 0; } s_scale_init_flag++; } return 1; }