void caml_set_minor_heap_size (asize_t size) { char *new_heap; void *new_heap_base; Assert (size >= Minor_heap_min); Assert (size <= Minor_heap_max); Assert (size % sizeof (value) == 0); if (caml_young_ptr != caml_young_end) caml_minor_collection (); Assert (caml_young_ptr == caml_young_end); new_heap = caml_aligned_malloc(size, 0, &new_heap_base); if (new_heap == NULL) caml_raise_out_of_memory(); if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0) caml_raise_out_of_memory(); if (caml_young_start != NULL){ #ifdef SYS_xen /* XXX temporary until memory allocator works properly */ printk("caml_set_minor_heap_size: resize unsupported\n"); caml_raise_out_of_memory(); #else caml_page_table_remove(In_young, caml_young_start, caml_young_end); free (caml_young_base); #endif } caml_young_base = new_heap_base; caml_young_start = new_heap; caml_young_end = new_heap + size; caml_young_limit = caml_young_start; caml_young_ptr = caml_young_end; caml_minor_heap_size = size; reset_table (&caml_ref_table); reset_table (&caml_weak_ref_table); }
void caml_set_minor_heap_size (asize_t size) { char *new_heap; void *new_heap_base; Assert (size >= Minor_heap_min); Assert (size <= Minor_heap_max); Assert (size % sizeof (value) == 0); if (caml_young_ptr != caml_young_end) caml_minor_collection (); Assert (caml_young_ptr == caml_young_end); new_heap = caml_aligned_malloc(size, 0, &new_heap_base); if (new_heap == NULL) caml_raise_out_of_memory(); if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0) caml_raise_out_of_memory(); if (caml_young_start != NULL){ caml_page_table_remove(In_young, caml_young_start, caml_young_end); free (caml_young_base); } caml_young_base = new_heap_base; caml_young_start = new_heap; caml_young_end = new_heap + size; caml_young_limit = caml_young_start; caml_young_ptr = caml_young_end; caml_minor_heap_size = size; reset_table (&caml_ref_table); reset_table (&caml_weak_ref_table); }
CAMLprim value sunml_lsolver_make_custom(value vid, value vops, value vhasops) { CAMLparam3(vid, vops, vhasops); #if SUNDIALS_LIB_VERSION >= 300 SUNLinearSolver ls; SUNLinearSolver_Ops ops; ls = (SUNLinearSolver)malloc(sizeof *ls); if (ls == NULL) caml_raise_out_of_memory(); ops = (SUNLinearSolver_Ops) malloc( sizeof(struct _generic_SUNLinearSolver_Ops)); if (ops == NULL) { free(ls); caml_raise_out_of_memory(); } /* Attach operations */ ops->gettype = (Int_val(vid) == 0) ? callml_custom_gettype_direct : callml_custom_gettype_iterative; ops->initialize = callml_custom_initialize; ops->setup = callml_custom_setup; ops->solve = callml_custom_solve; ops->lastflag = NULL; ops->free = callml_custom_free; ops->setatimes = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_SET_ATIMES)) ? callml_custom_setatimes : NULL; ops->setpreconditioner = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_SET_PRECONDITIONER)) ? callml_custom_setpreconditioner : NULL; ops->setscalingvectors = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_SET_SCALING_VECTORS)) ? callml_custom_setscalingvectors : NULL; ops->numiters = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_GET_NUM_ITERS)) ? callml_custom_numiters : NULL; ops->resnorm = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_GET_RES_NORM)) ? callml_custom_resnorm : NULL; ops->resid = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_GET_RES_ID)) ? callml_custom_resid : NULL; ops->space = Bool_val(Field(vhasops, RECORD_LSOLVER_HASOPS_GET_WORK_SPACE)) ? callml_custom_space : NULL; ls->ops = ops; ls->content = (void *)vops; caml_register_generational_global_root((void *)&(ls->content)); CAMLreturn(alloc_lsolver(ls)); #else CAMLreturn(Val_unit); #endif }
/* [caml_ba_alloc] will allocate a new bigarray object in the heap. If [data] is NULL, the memory for the contents is also allocated (with [malloc]) by [caml_ba_alloc]. [data] cannot point into the OCaml heap. [dim] may point into an object in the OCaml heap. */ CAMLexport value caml_ba_alloc(int flags, int num_dims, void * data, intnat * dim) { uintnat num_elts, asize, size; int overflow, i; value res; struct caml_ba_array * b; intnat dimcopy[CAML_BA_MAX_NUM_DIMS]; #if defined(__FreeBSD__) && defined(_KERNEL) struct caml_ba_proxy *proxy; #endif Assert(num_dims >= 1 && num_dims <= CAML_BA_MAX_NUM_DIMS); Assert((flags & CAML_BA_KIND_MASK) <= CAML_BA_COMPLEX64); for (i = 0; i < num_dims; i++) dimcopy[i] = dim[i]; size = 0; if (data == NULL) { overflow = 0; num_elts = 1; for (i = 0; i < num_dims; i++) { num_elts = caml_ba_multov(num_elts, dimcopy[i], &overflow); } size = caml_ba_multov(num_elts, caml_ba_element_size[flags & CAML_BA_KIND_MASK], &overflow); if (overflow) caml_raise_out_of_memory(); data = __malloc(size); if (data == NULL && size != 0) caml_raise_out_of_memory(); flags |= CAML_BA_MANAGED; } asize = SIZEOF_BA_ARRAY + num_dims * sizeof(intnat); res = caml_alloc_custom(&caml_ba_ops, asize, size, CAML_BA_MAX_MEMORY); b = Caml_ba_array_val(res); #if defined(__FreeBSD__) && defined(_KERNEL) if ((flags & CAML_BA_MANAGED_MASK) != CAML_BA_MANAGED) { b->proxy = __malloc(sizeof(struct caml_ba_proxy)); if (b->proxy == NULL) caml_raise_out_of_memory(); proxy = b->proxy; for (proxy->size = 0, i = 0; i < num_dims; i++) proxy->size += dim[i]; proxy->refcount = 1; if ((flags & CAML_BA_MANAGED_MASK) == CAML_BA_FBSD_MBUF) { proxy->type = CAML_FREEBSD_MBUF; proxy->data = data; b->data = mtod((struct mbuf *) proxy->data, void *); }
value guestfs_int_mllib_progress_bar_init (value machine_readablev) { CAMLparam1 (machine_readablev); CAMLlocal1 (barv); struct progress_bar *bar; const int machine_readable = Bool_val (machine_readablev); unsigned flags = 0; /* XXX Have to do this to get nl_langinfo to work properly. However * we should really only call this from main. */ setlocale (LC_ALL, ""); if (machine_readable) flags |= PROGRESS_BAR_MACHINE_READABLE; bar = progress_bar_init (flags); if (bar == NULL) caml_raise_out_of_memory (); barv = caml_alloc_custom (&progress_bar_custom_operations, sizeof (struct progress_bar *), 0, 1); Bar_val (barv) = bar; CAMLreturn (barv); }
CAMLexport void * caml_stat_resize (void * blk, asize_t sz) { void * result = realloc (blk, sz); if (result == NULL) caml_raise_out_of_memory (); return result; }
CAMLprim value sunml_lsolver_lapack_band(value vnvec, value vbmat) { CAMLparam2(vnvec, vbmat); #if SUNDIALS_LIB_VERSION >= 300 && defined SUNDIALS_ML_LAPACK SUNMatrix bmat = MAT_VAL(vbmat); SUNLinearSolver ls = SUNLapackBand(NVEC_VAL(vnvec), bmat); if (ls == NULL) { if (SUNBandMatrix_Rows(bmat) != SUNBandMatrix_Columns(bmat)) caml_raise_constant(LSOLVER_EXN(MatrixNotSquare)); if (SUNBandMatrix_StoredUpperBandwidth(bmat) < SUNMIN(SUNBandMatrix_Rows(bmat) - 1, SUNBandMatrix_LowerBandwidth(bmat) + SUNBandMatrix_UpperBandwidth(bmat))) caml_raise_constant(LSOLVER_EXN(InsufficientStorageUpperBandwidth)); if (SUNBandMatrix_Rows(bmat) != NV_LENGTH_S(NVEC_VAL(vnvec))) caml_raise_constant(LSOLVER_EXN(MatrixVectorMismatch)); caml_raise_out_of_memory(); } CAMLreturn(alloc_lsolver(ls)); #else CAMLreturn(Val_unit); #endif }
CAMLprim value caml_thread_new(value clos) /* ML */ { caml_thread_t th; st_retcode err; /* Create a thread info block */ th = caml_thread_new_info(); if (th == NULL) caml_raise_out_of_memory(); /* Equip it with a thread descriptor */ th->descr = caml_thread_new_descriptor(clos); /* Add thread info block to the list of threads */ th->next = curr_thread->next; th->prev = curr_thread; curr_thread->next->prev = th; curr_thread->next = th; /* Create the new thread */ err = st_thread_create(NULL, caml_thread_start, (void *) th); if (err != 0) { /* Creation failed, remove thread info block from list of threads */ caml_thread_remove_info(th); st_check_error(err, "Thread.create"); } /* Create the tick thread if not already done. Because of PR#4666, we start the tick thread late, only when we create the first additional thread in the current process*/ if (! caml_tick_thread_running) { err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL); st_check_error(err, "Thread.create"); caml_tick_thread_running = 1; } return th->descr; }
static void extern_stack_overflow(void) { caml_gc_message (0x04, "Stack overflow in marshaling value\n", 0); extern_replay_trail(); free_extern_output(); caml_raise_out_of_memory(); }
/* Allocate a page-aligned bigarray of length [n_pages] pages. Since CAML_BA_MANAGED is set the bigarray C finaliser will call free() whenever all sub-bigarrays are unreachable. */ CAMLprim value mirage_alloc_pages(value did_gc, value n_pages) { CAMLparam2(did_gc, n_pages); size_t len = Int_val(n_pages) * PAGE_SIZE; /* If the allocation fails, return None. The ocaml layer will be able to trigger a full GC which just might run finalizers of unused bigarrays which will free some memory. */ void* block = malloc(len); if (block == NULL) { if (Bool_val(did_gc)) printf("ERROR: Io_page: memalign(%d, %zu) failed, even after GC.\n", PAGE_SIZE, len); caml_raise_out_of_memory(); } /* Explicitly zero the page before returning it */ memset(block, 0, len); /* OCaml 4.02 introduced bigarray element type CAML_BA_CHAR, which needs to be used - otherwise type t in io_page.ml is different from the allocated bigarray and equality won't hold. Only since 4.02 there is a <caml/version.h>, thus we cannot include it in order to detect the version of the OCaml runtime. Instead, we use definitions which were introduced by 4.02 - and cross fingers that they'll stay there in the future. Once <4.02 support is removed, we should get rid of this hack. -- hannes, 16th Feb 2015 */ #ifdef Caml_ba_kind_val CAMLreturn(caml_ba_alloc_dims(CAML_BA_CHAR | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #else CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #endif }
value hdf5_h5l_get_name_by_idx(value loc_v, value group_name_v, value index_field_v, value order_v, value lapl_v, value n_v) { CAMLparam5(loc_v, group_name_v, index_field_v, order_v, lapl_v); CAMLxparam1(n_v); CAMLlocal1(name_v); hid_t loc_id = Hid_val(loc_v), lapl_id = H5P_opt_val(lapl_v); const char *group_name = String_val(group_name_v); H5_index_t index_field = H5_index_val(index_field_v); H5_iter_order_t order = H5_iter_order_val(order_v); hsize_t n = Int_val(n_v); char *name; ssize_t size; size = H5Lget_name_by_idx(loc_id, group_name, index_field, order, n, NULL, 0, lapl_id); if (size < 0) fail(); size++; name = malloc(size); if (name == NULL) caml_raise_out_of_memory(); size = H5Lget_name_by_idx(loc_id, group_name, index_field, order, n, name, size, lapl_id); if (size < 0) { free(name); fail(); } name_v = caml_copy_string(name); free(name); CAMLreturn(name_v); }
static void stub_xtl_ocaml_vmessage(struct xentoollog_logger *logger, xentoollog_level level, int errnoval, const char *context, const char *format, va_list al) { caml_leave_blocking_section(); CAMLparam0(); CAMLlocalN(args, 4); struct caml_xtl *xtl = (struct caml_xtl*)logger; value *func = caml_named_value(xtl->vmessage_cb) ; char *msg; if (func == NULL) caml_raise_sys_error(caml_copy_string("Unable to find callback")); if (vasprintf(&msg, format, al) < 0) caml_raise_out_of_memory(); /* vmessage : level -> int option -> string option -> string -> unit; */ args[0] = Val_level(level); args[1] = Val_errno(errnoval); args[2] = Val_context(context); args[3] = caml_copy_string(msg); free(msg); caml_callbackN(*func, 4, args); CAMLdone; caml_enter_blocking_section(); }
value hdf5_h5lt_get_dataset_info(value loc_id_v, value dset_name_v) { CAMLparam2(loc_id_v, dset_name_v); CAMLlocal1(info); hid_t loc_id = Hid_val(loc_id_v); const char *dset_name = String_val(dset_name_v); int rank; hsize_t *dims; H5T_class_t class_id; size_t type_size; herr_t err; raise_if_fail(H5LTget_dataset_ndims(loc_id, dset_name, &rank)); dims = calloc(rank, sizeof(hsize_t)); if (dims == NULL) caml_raise_out_of_memory(); err = H5LTget_dataset_info(loc_id, dset_name, dims, &class_id, &type_size); if (err < 0) { free(dims); fail(); } info = caml_alloc_tuple(3); Store_field(info, 0, val_hsize_t_array(rank, dims)); Store_field(info, 1, Val_h5t_class(class_id)); Store_field(info, 2, Val_int(type_size)); CAMLreturn(info); }
CAMLprim value caml_init_vmnet(value v_mode) { CAMLparam1(v_mode); CAMLlocal3(v_iface_ref,v_res,v_mac); xpc_object_t interface_desc = xpc_dictionary_create(NULL, NULL, 0); xpc_dictionary_set_uint64(interface_desc, vmnet_operation_mode_key, Int_val(v_mode)); uuid_t uuid; uuid_generate_random(uuid); xpc_dictionary_set_uuid(interface_desc, vmnet_interface_id_key, uuid); __block interface_ref iface = NULL; __block vmnet_return_t iface_status = 0; __block unsigned char *mac = malloc(6); if (!mac) caml_raise_out_of_memory (); __block unsigned int mtu = 0; __block unsigned int max_packet_size = 0; dispatch_queue_t if_create_q = dispatch_queue_create("org.openmirage.vmnet.create", DISPATCH_QUEUE_SERIAL); dispatch_semaphore_t iface_created = dispatch_semaphore_create(0); iface = vmnet_start_interface(interface_desc, if_create_q, ^(vmnet_return_t status, xpc_object_t interface_param) { iface_status = status; if (status != VMNET_SUCCESS || !interface_param) { dispatch_semaphore_signal(iface_created); return; } //printf("mac desc: %s\n", xpc_copy_description(xpc_dictionary_get_value(interface_param, vmnet_mac_address_key))); const char *macStr = xpc_dictionary_get_string(interface_param, vmnet_mac_address_key); unsigned char lmac[6]; if (sscanf(macStr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &lmac[0], &lmac[1], &lmac[2], &lmac[3], &lmac[4], &lmac[5]) != 6) errx(1, "Unexpected MAC address received from vmnet"); memcpy(mac, lmac, 6); mtu = xpc_dictionary_get_uint64(interface_param, vmnet_mtu_key); max_packet_size = xpc_dictionary_get_uint64(interface_param, vmnet_max_packet_size_key); dispatch_semaphore_signal(iface_created); });
value caml_aligned_array_create(size_t alignment, value len) { CAMLparam1 (len); void* bp; mlsize_t bosize; int result; bosize = (Int_val(len) + 1) * alignment; result = posix_memalign(&bp, alignment, bosize); if (result != 0) { if (result == EINVAL) caml_failwith( "The alignment was not a power of two, or was not a multiple of sizeof(void *)"); else if (result == ENOMEM) caml_raise_out_of_memory(); else caml_failwith("Unrecognized error"); } /* Leave space for the header */ bp += alignment; Hd_bp (bp) = Make_header (Wosize_bhsize(Bhsize_bosize(bosize - alignment)), Double_array_tag, Caml_white); CAMLreturn (Val_bp(bp)); }
static void map_fixed(void* mem, uintnat size, int prot) { if (mmap((void*)mem, size, prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED) { caml_raise_out_of_memory(); } }
CAMLexport value caml_alloc_shr (mlsize_t wosize, tag_t tag) { header_t *hp; value *new_block; if (wosize > Max_wosize) caml_raise_out_of_memory (); hp = caml_fl_allocate (wosize); if (hp == NULL){ new_block = expand_heap (wosize); if (new_block == NULL) { if (caml_in_minor_collection) caml_fatal_error ("Fatal error: out of memory.\n"); else caml_raise_out_of_memory (); } caml_fl_add_blocks ((value) new_block); hp = caml_fl_allocate (wosize); } Assert (Is_in_heap (Val_hp (hp))); /* Inline expansion of caml_allocation_color. */ if (caml_gc_phase == Phase_mark || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){ Hd_hp (hp) = Make_header (wosize, tag, Caml_black); }else{ Assert (caml_gc_phase == Phase_idle || (caml_gc_phase == Phase_sweep && (addr)hp < (addr)caml_gc_sweep_hp)); Hd_hp (hp) = Make_header (wosize, tag, Caml_white); } Assert (Hd_hp (hp) == Make_header (wosize, tag, caml_allocation_color (hp))); caml_allocated_words += Whsize_wosize (wosize); if (caml_allocated_words > caml_minor_heap_wsz){ caml_urge_major_slice (); } #ifdef DEBUG { uintnat i; for (i = 0; i < wosize; i++){ Field (Val_hp (hp), i) = Debug_uninit_major; } } #endif return Val_hp (hp); }
static void* large_allocate(struct caml_heap_state* local, mlsize_t sz) { large_alloc* a = malloc(sz + LARGE_ALLOC_HEADER_SZ); if (!a) caml_raise_out_of_memory(); a->owner = local->owner; a->next = local->swept_large; local->swept_large = a; local->large_bytes_allocated += sz; return (char*)a + LARGE_ALLOC_HEADER_SZ; }
/* * Copyright (C) 2009-2010 Citrix Ltd. * Author Vincent Hanquez <*****@*****.**> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; version 2.1 only. with the special * exception on linking described in file LICENSE. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. */ #include <stdlib.h> #define CAML_NAME_SPACE #include <caml/alloc.h> #include <caml/memory.h> #include <caml/signals.h> #include <caml/fail.h> #include <caml/callback.h> #include <sys/mman.h> #include <stdint.h> #include <string.h> #include "libxl.h" struct caml_logger { struct xentoollog_logger logger; int log_offset; char log_buf[2048]; }; typedef struct caml_gc { int offset; void *ptrs[64]; } caml_gc; void log_vmessage(struct xentoollog_logger *logger, xentoollog_level level, int errnoval, const char *context, const char *format, va_list al) { struct caml_logger *ologger = (struct caml_logger *) logger; ologger->log_offset += vsnprintf(ologger->log_buf + ologger->log_offset, 2048 - ologger->log_offset, format, al); } void log_destroy(struct xentoollog_logger *logger) { } #define INIT_STRUCT() libxl_ctx ctx; struct caml_logger lg; struct caml_gc gc; gc.offset = 0; #define INIT_CTX() \ lg.logger.vmessage = log_vmessage; \ lg.logger.destroy = log_destroy; \ lg.logger.progress = NULL; \ caml_enter_blocking_section(); \ ret = libxl_ctx_init(&ctx, LIBXL_VERSION, (struct xentoollog_logger *) &lg); \ if (ret != 0) \ failwith_xl("cannot init context", &lg); #define FREE_CTX() \ gc_free(&gc); \ caml_leave_blocking_section(); \ libxl_ctx_free(&ctx) static char * dup_String_val(caml_gc *gc, value s) { int len; char *c; len = caml_string_length(s); c = calloc(len + 1, sizeof(char)); if (!c) caml_raise_out_of_memory(); gc->ptrs[gc->offset++] = c; memcpy(c, String_val(s), len); return c; } static void gc_free(caml_gc *gc) { int i; for (i = 0; i < gc->offset; i++) { free(gc->ptrs[i]); } } void failwith_xl(char *fname, struct caml_logger *lg) { char *s; s = (lg) ? lg->log_buf : fname; caml_raise_with_string(*caml_named_value("xl.error"), s); } #if 0 /* TODO: wrap libxl_domain_create(), these functions will be needed then */ static void * gc_calloc(caml_gc *gc, size_t nmemb, size_t size) { void *ptr; ptr = calloc(nmemb, size); if (!ptr) caml_raise_out_of_memory(); gc->ptrs[gc->offset++] = ptr; return ptr; }
static void init_extern_output(void) { extern_userprovided_output = NULL; extern_output_first = malloc(sizeof(struct output_block)); if (extern_output_first == NULL) caml_raise_out_of_memory(); extern_output_block = extern_output_first; extern_output_block->next = NULL; extern_ptr = extern_output_block->data; extern_limit = extern_output_block->data + SIZE_EXTERN_OUTPUT_BLOCK; }
/* [caml_ba_alloc] will allocate a new bigarray object in the heap. If [data] is NULL, the memory for the contents is also allocated (with [malloc]) by [caml_ba_alloc]. [data] cannot point into the OCaml heap. [dim] may point into an object in the OCaml heap. */ CAMLexport value caml_ba_alloc(int flags, int num_dims, void * data, intnat * dim) { uintnat num_elts, asize, size; int overflow, i; value res; struct caml_ba_array * b; intnat dimcopy[CAML_BA_MAX_NUM_DIMS]; Assert(num_dims >= 1 && num_dims <= CAML_BA_MAX_NUM_DIMS); Assert((flags & CAML_BA_KIND_MASK) <= CAML_BA_COMPLEX64); for (i = 0; i < num_dims; i++) dimcopy[i] = dim[i]; size = 0; if (data == NULL) { overflow = 0; num_elts = 1; for (i = 0; i < num_dims; i++) { num_elts = caml_ba_multov(num_elts, dimcopy[i], &overflow); } size = caml_ba_multov(num_elts, caml_ba_element_size[flags & CAML_BA_KIND_MASK], &overflow); if (overflow) caml_raise_out_of_memory(); data = malloc(size); if (data == NULL && size != 0) caml_raise_out_of_memory(); flags |= CAML_BA_MANAGED; } /* PR#5516: use C99's flexible array types if possible */ #if (__STDC_VERSION__ >= 199901L) asize = sizeof(struct caml_ba_array) + num_dims * sizeof(intnat); #else asize = sizeof(struct caml_ba_array) + (num_dims - 1) * sizeof(intnat); #endif res = caml_alloc_custom(&caml_ba_ops, asize, size, CAML_BA_MAX_MEMORY); b = Caml_ba_array_val(res); b->data = data; b->num_dims = num_dims; b->flags = flags; b->proxy = NULL; for (i = 0; i < num_dims; i++) b->dim[i] = dimcopy[i]; return res; }
static char * dup_String_val(value s) { int len; char *c; len = caml_string_length(s); c = calloc(len + 1, sizeof(char)); if (!c) caml_raise_out_of_memory(); memcpy(c, String_val(s), len); return c; }
CAMLexport void * caml_stat_alloc (asize_t sz) { void * result = malloc (sz); /* malloc() may return NULL if size is 0 */ if (result == NULL && sz != 0) caml_raise_out_of_memory (); #ifdef DEBUG memset (result, Debug_uninit_stat, sz); #endif return result; }
CAMLexport void * caml_stat_alloc (asize_t sz) { void* result = malloc (sizeof(value) + sz); if (result == NULL) caml_raise_out_of_memory(); Hd_hp(result) = Make_header(STAT_ALLOC_MAGIC, Abstract_tag, NOT_MARKABLE); #ifdef DEBUG memset ((void*)Val_hp(result), Debug_uninit_stat, sz); #endif return (void*)Val_hp(result); }
static void* large_allocate(struct caml_heap_state* local, mlsize_t sz) { large_alloc* a = malloc(sz + LARGE_ALLOC_HEADER_SZ); if (!a) caml_raise_out_of_memory(); local->stats.large_words += Wsize_bsize(sz + LARGE_ALLOC_HEADER_SZ); if (local->stats.large_words > local->stats.large_max_words) local->stats.large_max_words = local->stats.large_words; local->stats.large_blocks++; a->owner = local->owner; a->next = local->swept_large; local->swept_large = a; return (char*)a + LARGE_ALLOC_HEADER_SZ; }
CAMLprim value sunml_lsolver_pcg(value vmaxl, value vnvec) { CAMLparam2(vmaxl, vnvec); #if SUNDIALS_LIB_VERSION >= 300 SUNLinearSolver ls = SUNPCG(NVEC_VAL(vnvec), PREC_NONE, Int_val(vmaxl)); if (ls == NULL) caml_raise_out_of_memory(); CAMLreturn(alloc_lsolver(ls)); #else CAMLreturn(Val_unit); #endif }
static char * dup_String_val(caml_gc *gc, value s) { int len; char *c; len = caml_string_length(s); c = calloc(len + 1, sizeof(char)); if (!c) caml_raise_out_of_memory(); gc->ptrs[gc->offset++] = c; memcpy(c, String_val(s), len); return c; }
char* ml_Elm_Gen_Item_Text_Get_Cb( void* data, Evas_Object* obj, const char* part) { CAMLparam0(); CAMLlocal3(v_obj, v_part, v); value* v_class = data; v_obj = copy_Evas_Object(obj); v_part = copy_string(part); v = caml_callback2(Field(*v_class, 1), v_obj, v_part); char* r = strdup(String_val(v)); if(r == NULL) caml_raise_out_of_memory(); CAMLreturnT(char*, r); }
static value alloc_vmnet_state(interface_ref i) { value v = alloc_custom(&vmnet_state_ops, sizeof(struct vmnet_state *), 0, 1); struct vmnet_state *vms = malloc(sizeof(struct vmnet_state)); if (!vms) caml_raise_out_of_memory(); vms->iref = i; pthread_mutex_init(&vms->vmm, NULL); pthread_cond_init(&vms->vmc, NULL); Vmnet_state_val(v) = vms; return v; }
/* Allocate a page-aligned bigarray of length [n_pages] pages. Since CAML_BA_MANAGED is set the bigarray C finaliser will call free() whenever all sub-bigarrays are unreachable. */ CAMLprim value caml_alloc_pages(value did_gc, value n_pages) { CAMLparam2(did_gc, n_pages); size_t len = Int_val(n_pages) * PAGE_SIZE; /* If the allocation fails, return None. The ocaml layer will be able to trigger a full GC which just might run finalizers of unused bigarrays which will free some memory. */ #ifdef __MINIOS__ void* block = _xmalloc(len, PAGE_SIZE); if (block == NULL) { #elif _WIN32 /* NB we can't use _aligned_malloc because we can't get OCaml to finalize with _aligned_free. Regular free() will not work. */ static int printed_warning = 0; if (!printed_warning) { printed_warning = 1; printk("WARNING: Io_page on Windows doesn't guarantee alignment\n"); } void *block = malloc(len); if (block == NULL) { #else void* block = NULL; int ret = posix_memalign(&block, PAGE_SIZE, len); if (ret < 0) { #endif if (Bool_val(did_gc)) printk("Io_page: memalign(%d, %zu) failed, even after GC.\n", PAGE_SIZE, len); caml_raise_out_of_memory(); } /* Explicitly zero the page before returning it */ memset(block, 0, len); /* OCaml 4.02 introduced bigarray element type CAML_BA_CHAR, which needs to be used - otherwise type t in io_page.ml is different from the allocated bigarray and equality won't hold. Only since 4.02 there is a <caml/version.h>, thus we cannot include it in order to detect the version of the OCaml runtime. Instead, we use definitions which were introduced by 4.02 - and cross fingers that they'll stay there in the future. Once <4.02 support is removed, we should get rid of this hack. -- hannes, 16th Feb 2015 */ #ifdef Caml_ba_kind_val CAMLreturn(caml_ba_alloc_dims(CAML_BA_CHAR | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #else CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #endif }