value Pvm_getmboxinfo (value s1) { CAMLparam1(s1); int res,bytes,nb,n; int nclasses; int i,j,k; struct pvmmboxinfo *p; value v; CAMLlocal1(r); r = alloc(4, 0); res=pvm_getmboxinfo(String_val(s1),&nclasses,&p); if (res<0) TreatError(res); Store_field (r, 0, alloc_shr(nclasses,0)); for (i=0;i<nclasses;i++) initialize(&Field(Field(r, 0),i),Val_int(0)); for (i=0;i<nclasses;i++) { bytes=strlen(p[i].mi_name); Store_field (r, 1, alloc_string(bytes)); for (j=0;j<bytes;j++) Byte(Field(r, 1),j)=p[i].mi_name[j]; nb=p[i].mi_nentries; /* printf("nb=%d\n",nb);*/ Store_field (r, 2, alloc_shr(nb,0)); for (j=0;j<nb;j++) initialize(&Field(Field(r, 2),j),Val_int(p[i].mi_indices[j])); Store_field (r, 3, alloc_shr(nb,0)); for (j=0;j<nb;j++) initialize(&Field(Field(r, 3),j),Val_int(p[i].mi_owners[j])); v=alloc_tuple(3); Store_field(v,0, Field(r, 1)); /* Field(v,1)=Val_int(nb);*/ Store_field(v,1, Field(r, 2)); Store_field(v,2, Field(r, 3)); modify(&Field(Field(r, 0),i),v); } CAMLreturn(Field(r, 0)); }
/* UNUSED */ value copy_outgoing_hack(struct outgoing_array * _c1) { CAMLparam0 (); CAMLlocal1(_v2); mlsize_t _c3; if ((*_c1).length == 0) { _v2 = Val_int(0); // the empty array } else { // assert((*_c1).length > 0); if ((*_c1).length < Max_young_wosize) { _v2 = alloc_small((*_c1).length,0); for (_c3 = 0; _c3 < (*_c1).length; _c3++) { Field(_v2,_c3) = Val_int((unsigned char)((*_c1).contents[_c3])); } } else { _v2 = alloc_shr((*_c1).length,0); for (_c3 = 0; _c3 < (*_c1).length; _c3++) { initialize(&Field(_v2,_c3), Val_int((unsigned char)((*_c1).contents[_c3]))); } } } CAMLreturn(_v2); }
/* I don't want to adjust the GC so I've made my own alloc_final, stolen from alloc.c */ value mlbdd_alloc_final(mlsize_t len, final_fun fun) { value result; result = alloc_shr(len, Final_tag); Final_fun(result) = fun; return result; }
value mkexnname(char* name) { value ref; Push_roots(r, 1); r[0] = copy_string(name); ref = alloc_shr(1, Reference_tag); modify(&Field(ref, 0), r[0]); Pop_roots(); return ref; }
static void oldify (value *p, value v) { value result; mlsize_t i; tail_call: if (IS_BLOCK(v) && Is_young (v)){ assert (Hp_val (v) < young_ptr); if (Is_blue_val (v)){ /* Already forwarded ? */ *p = Field (v, 0); /* Then the forward pointer is the first field. */ }else if (Tag_val (v) >= No_scan_tag){ result = alloc_shr (Wosize_val (v), Tag_val (v)); bcopy (Bp_val (v), Bp_val (result), Bosize_val (v)); Hd_val (v) = Bluehd_hd (Hd_val (v)); /* Put the forward flag. */ Field (v, 0) = result; /* And the forward pointer. */ *p = result; }else{ /* We can do recursive calls before all the fields are filled, because we will not be calling the major GC. */ value field0 = Field (v, 0); mlsize_t sz = Wosize_val (v); result = alloc_shr (sz, Tag_val (v)); *p = result; Hd_val (v) = Bluehd_hd (Hd_val (v)); /* Put the forward flag. */ Field (v, 0) = result; /* And the forward pointer. */ if (sz == 1){ p = &Field (result, 0); v = field0; goto tail_call; }else{ oldify (&Field (result, 0), field0); for (i = 1; i < sz - 1; i++){ oldify (&Field (result, i), Field (v, i)); } p = &Field (result, i); v = Field (v, i); goto tail_call; } } }else{ *p = v; } }
CAMLexport value copy_memblock_indirected (void *src, asize_t size) { mlsize_t wosize = Wosize_asize(size); value ret; if (!src) ml_raise_null_pointer (); ret = alloc_shr (wosize+2, Abstract_tag); Field(ret,1) = (value)2; memcpy ((value *) ret + 2, src, size); return ret; }
value mkexnname(char* name) { value ref; PUSH_ROOTS(r, 1); r[0] = copy_string(name); ref = alloc_shr(1, Reference_tag); modify(&Field(ref, 0), r[0]); POP_ROOTS(); return ref; }
value weak_arr(value size) { value res; mlsize_t sz, i; sz = VAL_TO_LONG(size); if (sz == 0) return Atom(Weak_tag); res = alloc_shr(sz, Weak_tag); /* Must go in the major heap */ for (i = 0; i < sz; i++) Field(res, i) = (value)NULL; return res; }
CAMLprim value ml_stable_copy (value v) { if (Is_block(v) && (char*)(v) < young_end && (char*)(v) > young_start) { CAMLparam1(v); mlsize_t i, wosize = Wosize_val(v); int tag = Tag_val(v); value ret; if (tag < No_scan_tag) invalid_argument("ml_stable_copy"); ret = alloc_shr (wosize, tag); for (i=0; i < wosize; i++) Field(ret,i) = Field(v,i); CAMLreturn(ret); } return v; }
/* ML type: fddvar -> varnum vector */ EXTERNML value mlfdd_vars(value var) /* ML */ { value result; int *v, n, i; n = fdd_varnum(Int_val(var)); v = fdd_vars(Int_val(var)); if(n == 0) result = Atom(0); /* The empty vector */ else { result = n < Max_young_wosize ? alloc(n, 0) : alloc_shr(n, 0); for (i = 0; i < n; i++) { Field(result, i) = Val_long(v[i]); } free(v); } return result; }
/* ML type: varSet -> varnum vector */ EXTERNML value mlbdd_bdd_scanset(value varset) { value result; int *v, n, i; if(bdd_scanset(Bdd_val(varset), &v, &n)) { RAISE_DOMAIN; return Val_unit; /* unreachable, here to prevent warnings */ } else { if(n == 0) result = Atom(0); /* The empty vector */ else { result = n < Max_young_wosize ? alloc(n, 0) : alloc_shr(n, 0); for (i = 0; i < n; i++) { Field(result, i) = Val_long(v[i]); } free(v); } } return result; }
value Pvm_config (void) { CAMLparam0(); int res,bytes; int nhost,narch; int i,j; struct pvmhostinfo *p; value v; CAMLlocal1(r); r = alloc(3, 0); res=pvm_config(&nhost,&narch,&p); if (res<0) TreatError(res); Store_field (r, 2, alloc_shr(nhost,0)); for (i=0;i<nhost;i++) initialize(&Field(Field(r, 2),i),Val_int(0)); for (i=0;i<nhost;i++) { bytes=strlen(p[i].hi_name); Store_field (r, 0, alloc_string(bytes)); for (j=0;j<bytes;j++) Byte(Field(r, 0),j)=p[i].hi_name[j]; bytes=strlen(p[i].hi_arch); Store_field (r, 1, alloc_string(bytes)); for (j=0;j<bytes;j++) Byte(Field(r, 1),j)=p[i].hi_arch[j]; v=alloc_tuple(3); Store_field(v, 0, Field(r, 0)); Store_field(v, 1, Field(r, 1)); Store_field(v, 2, Val_int(p[i].hi_speed)); modify(&Field(Field(r, 2),i),v); } CAMLreturn(Field(r, 2)); }
value pop_stack_fragment(value vek1, value vek2) { const ptrdiff_t ek1 = Long_val(vek1); const ptrdiff_t ek2 = Long_val(vek2); value * const tp1 = caml_stack_high - ek1; value * const tp2 = caml_stack_high - ek2; value *p, *q; mlsize_t size, i; value block; myassert(tp2 < tp1); /* stack grows downwards */ size = tp1 - tp2; /* tp2 is more recent ptr */ /* print_gl_stack("pop_stack_fragment"); fprintf(stderr, "between %p and %p (size %ld)\n",tp2,tp1,size); print_exc_trace("pop_stack_fragment: before"); */ if (size < Max_young_wosize) { block = alloc(size, 0); memcpy(&Field(block, 0), tp2, size * sizeof(value)); } else { block = alloc_shr(size, 0); for (i = 0; i < size; i++) initialize(&Field(block, i), tp2[i]); } /* We check the invariants after the allocation of block, which may cause a GC run. Stack should not be moved though. */ myassert(caml_extern_sp >= caml_stack_low); myassert(caml_extern_sp <= caml_stack_high); myassert(caml_trapsp < caml_stack_high); myassert(tp1 < caml_stack_high); myassert(caml_trapsp == tp2); myassert(caml_extern_sp < tp2); /* Check the invariant that tp1 must occur somewhere in the Trap_link chain */ for(p=caml_trapsp; p == tp1; p = Trap_link(p)) if( !(p < caml_stack_high) ) { print_gl_stack("ERROR: tp1 is not found in the Trap_link chain!!!"); print_exc_trace("ERROR: tp1 is not found..."); myassert(0); } /* Adjust the links in the copied code: make them relative to tp2: the bottom of the copied stack */ p = tp2; while (1) { myassert( p < caml_stack_high ); q = Trap_link(p); if (q == tp1) { /* end of the chain */ Field(block, (value*)(&(Trap_link(p))) - tp2) = Val_long(0); break; } Field(block, (value*)(&(Trap_link(p))) - tp2) = Val_long(q - tp2); p = q; } caml_trapsp = tp1; /* Reset the chain */ return block; }
value alloc_memblock_indirected (asize_t size) { value ret = alloc_shr (Wosize_asize(size)+2, Abstract_tag); Field(ret,1) = (value)2; return ret; }