value ffmpeg_close(value ctx) { CAMLparam1(ctx); if (Context_val(ctx)->fmtCtx) { AVFormatContext* fmtCtx = Context_val(ctx)->fmtCtx; caml_enter_blocking_section(); if (fmtCtx->pb) { av_write_trailer(fmtCtx); } //avcodec_close(Context_val(ctx)->avstream->codec); ?? avformat_free_context(fmtCtx); if (!(fmtCtx->flags & AVFMT_NOFILE)) { int ret = avio_close(fmtCtx->pb); raise_and_leave_blocking_section_if_not(ret >= 0, ExnFileIO, ret); } caml_leave_blocking_section(); Context_val(ctx)->fmtCtx = NULL; free(Context_val(ctx)->filename); Context_val(ctx)->filename = NULL; } CAMLreturn(Val_unit); }
value ffmpeg_open_input(value filename_) { CAMLparam1(filename_); CAMLlocal1(ctx); av_register_all(); // this is fast to redo ctx = caml_alloc_custom(&context_ops, sizeof(struct Context), 0, 1); Context_val(ctx)->filename = strdup((char*) filename_); int ret; AVFormatContext* fmtCtx; char* filename = Context_val(ctx)->filename; caml_enter_blocking_section(); ret = avformat_open_input(&fmtCtx, filename, NULL, NULL); raise_and_leave_blocking_section_if_not(ret >= 0, ExnOpen, ret); ret = avformat_find_stream_info(fmtCtx, NULL); raise_and_leave_blocking_section_if_not(ret >= 0, ExnStreamInfo, ret); caml_leave_blocking_section(); Context_val(ctx)->fmtCtx = fmtCtx; CAMLreturn(ctx); }
static value stub_wrap_new(void *wrappable, struct custom_operations *ops) { CAMLparam0 (); CAMLlocal1 (wrap); wrap = caml_alloc_custom(ops, sizeof (struct wrap), 0, 1); Context_val(wrap)->wrapped = wrappable; Context_val(wrap)->terminated = 0; CAMLreturn (wrap); }
value ffmpeg_stream_new_video(value ctx, value video_info_) { CAMLparam2(ctx, video_info_); CAMLlocal1(stream); stream = caml_alloc_tuple(StreamSize); AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264); int ret; Stream_aux_direct_val(stream) = caml_alloc_custom(&streamaux_ops, sizeof(struct StreamAux), 0, 1); Stream_aux_val(stream)->type = Val_int(STREAM_VIDEO); Stream_context_direct_val(stream) = ctx; Stream_aux_val(stream)->avstream = avformat_new_stream(Context_val(ctx)->fmtCtx, codec); Stream_aux_val(stream)->avstream->codec->codec_id = AV_CODEC_ID_H264; /* Stream_aux_val(stream)->avstream->codec->rc_min_rate = 50000; */ /* Stream_aux_val(stream)->avstream->codec->rc_max_rate = 200000; */ /* Stream_aux_val(stream)->avstream->codec->bit_rate = 10000; */ Stream_aux_val(stream)->avstream->codec->width = Int_val(Field(video_info_, 0)); Stream_aux_val(stream)->avstream->codec->height = Int_val(Field(video_info_, 1)); Stream_aux_val(stream)->avstream->codec->pix_fmt = AV_PIX_FMT_YUV420P; //Stream_aux_val(stream)->avstream->codec->gop_size = 30; if (Context_val(ctx)->fmtCtx->oformat->flags & AVFMT_GLOBALHEADER) { Stream_aux_val(stream)->avstream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } Stream_aux_val(stream)->avstream->time_base = (AVRational) {1, 10000}; AVDictionary* codecOpts = NULL; /* av_dict_set(&codecOpts, "profile", "baseline", 0); */ /* av_dict_set(&codecOpts, "crf", "3", 0); */ /* av_dict_set(&codecOpts, "vbr", "1", 0); */ //av_dict_set(&codecOpts, "x264-params", "bitrate=2", 0); //av_dict_set(&codecOpts, "x264-params", "crf=40:keyint=60:vbv_bufsize=40000:vbv_maxrate=150000", 0); av_dict_set(&codecOpts, "x264-params", "crf=36:keyint=60", 0); AVCodecContext* codecCtx = Stream_aux_val(stream)->avstream->codec; caml_enter_blocking_section(); ret = avcodec_open2( codecCtx, codec, &codecOpts); raise_and_leave_blocking_section_if_not(ret >= 0, ExnOpen, ret); caml_leave_blocking_section(); assert(Stream_aux_val(stream)->avstream->codec->pix_fmt == AV_PIX_FMT_YUV420P); Stream_aux_val(stream)->swsCtx = sws_getContext(Stream_aux_val(stream)->avstream->codec->width, Stream_aux_val(stream)->avstream->codec->height, USER_PIXFORMAT, Stream_aux_val(stream)->avstream->codec->width, Stream_aux_val(stream)->avstream->codec->height, Stream_aux_val(stream)->avstream->codec->pix_fmt, 0, NULL, NULL, NULL); CAMLreturn((value) stream); }
value ffmpeg_stream_new_audio(value ctx, value audio_info_) { CAMLparam2(ctx, audio_info_); CAMLlocal1(stream); AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AAC); stream = caml_alloc_tuple(StreamSize); int ret; Stream_aux_direct_val(stream) = caml_alloc_custom(&streamaux_ops, sizeof(struct StreamAux), 0, 1); Stream_aux_val(stream)->type = Val_int(STREAM_AUDIO); Stream_context_direct_val(stream) = ctx; Stream_aux_val(stream)->avstream = avformat_new_stream(Context_val(ctx)->fmtCtx, codec); Stream_aux_val(stream)->avstream->codec->codec_id = AV_CODEC_ID_AAC; Stream_aux_val(stream)->avstream->codec->sample_rate = Int_val(Field(audio_info_, 0)); Stream_aux_val(stream)->avstream->codec->channels = Int_val(Field(audio_info_, 1)); Stream_aux_val(stream)->avstream->codec->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; Stream_aux_val(stream)->avstream->codec->channel_layout = AV_CH_LAYOUT_STEREO; //Stream_aux_val(stream)->avstream->codec->channels = av_get_channel_layout_nb_channels(Stream_aux_val(stream)->avstream->codec->channel_layout); if (Context_val(ctx)->fmtCtx->oformat->flags & AVFMT_GLOBALHEADER) { Stream_aux_val(stream)->avstream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } Stream_aux_val(stream)->avstream->time_base = (AVRational) {1, 10000}; AVDictionary* codecOpts = NULL; AVCodecContext* codecCtx = Stream_aux_val(stream)->avstream->codec; caml_enter_blocking_section(); ret = avcodec_open2(codecCtx, codec, &codecOpts); raise_and_leave_blocking_section_if_not(ret >= 0, ExnOpen, ret); caml_leave_blocking_section(); if (Stream_aux_val(stream)->avstream->codec->sample_fmt != AV_SAMPLE_FMT_S16) { Stream_aux_val(stream)->swrCtx = swr_alloc(); assert(Stream_aux_val(stream)->swrCtx); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "in_channel_count", Stream_aux_val(stream)->avstream->codec->channels, 0); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "in_sample_rate", Stream_aux_val(stream)->avstream->codec->sample_rate, 0); av_opt_set_sample_fmt(Stream_aux_val(stream)->swrCtx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "out_channel_count", Stream_aux_val(stream)->avstream->codec->channels, 0); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "out_sample_rate", Stream_aux_val(stream)->avstream->codec->sample_rate, 0); av_opt_set_sample_fmt(Stream_aux_val(stream)->swrCtx, "out_sample_fmt", Stream_aux_val(stream)->avstream->codec->sample_fmt, 0); } CAMLreturn((value) stream); }
static void _oy_context_finalize(value v_context) { context_t *context = Context_val(v_context); if (context != NULL) { yices_free_context(context); Store_context_val(v_context, NULL); } }
CAMLprim value caml_sha256_final(value ctx) { CAMLparam1(ctx); CAMLlocal1(res); res = alloc_string(32); SHA256_finish(Context_val(ctx), &Byte_u(res, 0)); CAMLreturn(res); }
CAMLprim value caml_md5_final(value ctx) { CAMLparam1(ctx); CAMLlocal1(res); res = alloc_string(16); caml_MD5Final(&Byte_u(res, 0), Context_val(ctx)); CAMLreturn(res); }
CAMLprim value caml_gp_camera_get_config(value camera_val, value context_val) { CAMLparam2(camera_val, context_val); Camera *camera = Camera_val(camera_val); GPContext *context = Context_val(context_val); CameraWidget *widget; int ret = gp_camera_get_config(camera, &widget, context); CHECK_RESULT(ret); CAMLreturn(encapsulate_pointer(widget)); }
CAMLprim value caml_gp_camera_capture_preview(value vcam, value vcon) { CAMLparam2(vcam, vcon); Camera *camera = Camera_val(vcam); GPContext *context = Context_val(vcon); CameraFile *file; gp_file_new(&file); gp_camera_capture_preview(camera, file, context); CAMLreturn(encapsulate_pointer(file)); }
CAMLprim value ocamlyices_context_stop_search(value v_context) { CAMLparam1(v_context); context_t *context = Context_val(v_context); COND_MT_START(MTFLAG_STOP_SEARCH); yices_stop_search(context); COND_MT_END(MTFLAG_STOP_SEARCH); CAMLreturn(Val_unit); }
value ffmpeg_open(value ctx) { CAMLparam1(ctx); int ret; char* filename = Context_val(ctx)->filename; AVFormatContext* fmtCtx = Context_val(ctx)->fmtCtx; caml_enter_blocking_section(); if (!(fmtCtx->flags & AVFMT_NOFILE)) { ret = avio_open(&fmtCtx->pb, filename, AVIO_FLAG_WRITE); raise_and_leave_blocking_section_if_not(ret >= 0, ExnFileIO, ret); } ret = avformat_write_header(fmtCtx, NULL); caml_leave_blocking_section(); raise_if_not(ret >= 0, ExnWriteHeader, ret); CAMLreturn(Val_unit); }
CAMLprim value caml_gp_camera_set_config(value camera_val, value context_val, value widget_val) { CAMLparam3(camera_val, context_val, widget_val); Camera *camera = Camera_val(camera_val); GPContext *context = Context_val(context_val); CameraWidget *widget = Widget_val(widget_val); int ret = gp_camera_set_config(camera, widget, context); CHECK_RESULT(ret); CAMLreturn(Val_unit); }
CAMLprim value socket_stub(value ctx, value socket_kind) { CAMLparam2 (ctx, socket_kind); struct wrap *context = Context_val(ctx); caml_release_runtime_system(); void *socket = zmq_socket(context->wrapped, Int_val(socket_kind)); caml_acquire_runtime_system(); stub_raise_if (socket == NULL); CAMLreturn (stub_socket_new(socket)); }
CAMLprim value caml_gp_camera_file_delete(value camera_val, value folder_val, \ value fname_val, value context_val) { CAMLparam4(camera_val, folder_val, fname_val, context_val); Camera *camera = Camera_val(camera_val); GPContext *context = Context_val(context_val); const char *folder = String_val(folder_val); const char *fname = String_val(fname_val); int ret = gp_camera_file_delete(camera, folder, fname, context); CHECK_RESULT(ret); CAMLreturn(Val_unit); }
CAMLprim value ocamlyices_context_status(value v_context) { CAMLparam1(v_context); context_t *context = Context_val(v_context); if (context == NULL) { _oy_freed_context_error(); } smt_status_t status = yices_context_status(context); CAMLreturn(Val_int((int)status)); }
value ffmpeg_create(value filename_) { CAMLparam1(filename_); CAMLlocal1(ctx); av_register_all(); // this is fast to redo ctx = caml_alloc_custom(&context_ops, sizeof(struct Context), 0, 1); Context_val(ctx)->filename = strdup((char*) filename_); int ret; AVFormatContext* fmtCtx; caml_enter_blocking_section(); ret = avformat_alloc_output_context2(&fmtCtx, NULL, NULL, (char*) filename_); caml_leave_blocking_section(); raise_if_not(ret >= 0, ExnContextAlloc, ret); Context_val(ctx)->fmtCtx = fmtCtx; CAMLreturn(ctx); }
CAMLprim value ocamlyices_context_check(value v_param_opt, value v_context) { CAMLparam2(v_param_opt, v_context); smt_status_t res; context_t *context = Context_val(v_context); param_t *param = (Is_block(v_param_opt) ? Params_val(Field(v_param_opt, 0)) : NULL); COND_MT_START(MTFLAG_CHECK); res = yices_check_context(context, param); COND_MT_END(MTFLAG_CHECK); CAMLreturn(Val_int((int)res)); }
CAMLprim value ocamlyices_context_assert_blocking_clause(value v_context) { CAMLparam1(v_context); int32_t res; context_t *context = Context_val(v_context); COND_MT_START(MTFLAG_ASSERT); res = yices_assert_blocking_clause(context); COND_MT_END(MTFLAG_ASSERT); if (res != 0) { _oy_error(); } CAMLreturn(Val_unit); }
CAMLprim value ocamlyices_context_reset(value v_context) { CAMLparam1(v_context); context_t *context = Context_val(v_context); if (context == NULL) { _oy_freed_context_error(); } COND_MT_START(MTFLAG_STACKOP); yices_reset_context(context); COND_MT_END(MTFLAG_STACKOP); CAMLreturn(Val_unit); }
CAMLprim value term_stub(value ctx) { CAMLparam1 (ctx); struct wrap *context = Context_val(ctx); if (!context->terminated) { caml_release_runtime_system(); int result = zmq_term(context->wrapped); caml_acquire_runtime_system(); stub_raise_if (result == -1); /* If raised this doesn't get executed */ context->terminated = 1; } CAMLreturn (Val_unit); }
CAMLprim value caml_gp_camera_file_get(value camera_val, value folder_val, \ value fname_val, value type_val, value context_val) { CAMLparam5(camera_val, folder_val, fname_val, type_val, context_val); Camera *camera = Camera_val(camera_val); GPContext *context = Context_val(context_val); const char *folder = String_val(folder_val); const char *fname = String_val(fname_val); CameraFileType ftype = (CameraFileType)Int_val(type_val); CameraFile *file; gp_file_new(&file); int ret = gp_camera_file_get(camera, folder, fname, ftype, file, context); CHECK_RESULT(ret); CAMLreturn(encapsulate_pointer(file)); }
static inline value _oy_context_stackop(int32_t (*f)(context_t *), value v_context) { CAMLparam1(v_context); int32_t res; context_t *context; context = Context_val(v_context); COND_MT_START(MTFLAG_STACKOP); res = (*f)(context); COND_MT_END(MTFLAG_STACKOP); if (res != 0) { _oy_error(); } CAMLreturn(Val_unit); }
CAMLprim value caml_gp_camera_capture(value cam_val, value typ_val, value context_val) { CAMLparam3(cam_val, typ_val, context_val); CAMLlocal1(res_val); Camera *cam = Camera_val(cam_val); GPContext *context = Context_val(context_val); CameraCaptureType typ = Int_val(typ_val); CameraFilePath *res = malloc(sizeof(CameraFilePath)); int ret = gp_camera_capture(cam, typ, res, context); CHECK_RESULT(ret); res_val = caml_alloc(2, 0); Store_field(res_val, 0, caml_copy_string(res->folder)); Store_field(res_val, 1, caml_copy_string(res->name)); free(res); CAMLreturn(res_val); }
CAMLprim value ocamlyices_context_assert_formula(value v_context, value v_formula) { // Hyp: v_formula is a non-block value CAMLparam1(v_context); int32_t res; term_t formula = Int_val(v_formula); context_t *context = Context_val(v_context); COND_MT_START(MTFLAG_ASSERT); res = yices_assert_formula(context, formula); COND_MT_END(MTFLAG_ASSERT); if (res != 0) { _oy_error(); } CAMLreturn(Val_unit); }
static inline value _oy_context_toggle_option( int32_t (*f)(context_t *, const char *), value v_context, value v_option ) { CAMLparam2(v_context, v_option); int32_t res; context_t *context = Context_val(v_context); const char *option = String_val(v_option); res = (*f)(context, option); if (res != 0) { _oy_error(); } CAMLreturn(Val_unit); \ }
value ffmpeg_stream_new(value ctx, value media_kind_) { CAMLparam2(ctx, media_kind_); CAMLlocal1(ret); if (Context_val(ctx)->fmtCtx) { switch (Tag_val(media_kind_)) { case 0: { ret = ffmpeg_stream_new_video(ctx, Field(media_kind_, 0)); } break; case 1: { ret = ffmpeg_stream_new_audio(ctx, Field(media_kind_, 0)); } break; } } else { raise(ExnClosed, 0); } CAMLreturn(ret); }
CAMLprim value ocamlyices_context_assert_formulas(value v_context, value v_formulas) { CAMLparam2(v_context, v_formulas); int32_t res; uint32_t n; term_t *formulas; n = check_Wosize_val(v_formulas); formulas = _oy_terms_from_values(v_formulas, n); if (formulas == NULL) { _oy_allocation_error(); } COND_MT_START(MTFLAG_ASSERT); res = yices_assert_formulas(Context_val(v_context), n, formulas); COND_MT_END(MTFLAG_ASSERT); free(formulas); if (res != 0) { _oy_error(); } CAMLreturn(Val_unit); }
CAMLprim value caml_md5_update(value ctx, value src, value ofs, value len) { caml_MD5Update(Context_val(ctx), &Byte_u(src, Long_val(ofs)), Long_val(len)); return Val_unit; }
CAMLprim value caml_md5_init(value unit) { value ctx = alloc_string(sizeof(struct MD5Context)); caml_MD5Init(Context_val(ctx)); return ctx; }