/*! Constructor \param name name used for error reporting */ atomic_base( const std::string& name) : index_( class_object().size() ) , sparsity_( set_sparsity_enum ) { CPPAD_ASSERT_KNOWN( ! thread_alloc::in_parallel() , "atomic_base: constructor cannot be called in parallel mode." ); class_object().push_back(this); class_name().push_back(name); CPPAD_ASSERT_UNKNOWN( class_object().size() == class_name().size() ); }
String* String::substring(STATE, Fixnum* start_f, Fixnum* count_f) { native_int start = start_f->to_native(); native_int count = count_f->to_native(); native_int total = num_bytes_->to_native(); if(count < 0) return (String*)Qnil; if(start < 0) { start += total; if(start < 0) return (String*)Qnil; } if(start > total) return (String*)Qnil; if(start + count > total) { count = total - start; } if(count < 0) count = 0; String* sub = String::create(state, Fixnum::from(count)); sub->klass(state, class_object(state)); uint8_t* buf = byte_address() + start; memcpy(sub->byte_address(), buf, count); if(tainted_p(state) == Qtrue) sub->taint(state); return sub; }
String* String::find_character(STATE, Fixnum* offset) { size_t o = (size_t)offset->to_native(); if(o >= size()) return (String*)Qnil; uint8_t* cur = byte_address() + o; String* output = 0; kcode::table* tbl = state->shared.kcode_table(); if(kcode::mbchar_p(tbl, *cur)) { size_t clen = kcode::mbclen(tbl, *cur); if(o + clen <= size()) { output = String::create(state, reinterpret_cast<const char*>(cur), clen); } } if(!output) { output = String::create(state, reinterpret_cast<const char*>(cur), 1); } output->klass(state, class_object(state)); if(RTEST(tainted_p(state))) output->taint(state); return output; }
MethodTable* MethodTable::duplicate(STATE) { size_t size, i; MethodTable* dup = 0; utilities::thread::SpinLock::LockGuard lg(lock_); size = bins_->to_native(); dup = MethodTable::create(state, size); // Allow for subclassing. dup->klass(state, class_object(state)); size_t num = bins_->to_native(); MethodTableBucket* entry = 0; for(i = 0; i < num; i++) { entry = try_as<MethodTableBucket>(values_->at(state, i)); while(entry) { dup->store(state, entry->name(), entry->method(), entry->visibility()); entry = try_as<MethodTableBucket>(entry->next()); } } return dup; }
Time* Time::dup(STATE) { Time* tm = state->new_object<Time>(class_object(state)); tm->seconds_ = seconds_; tm->microseconds_ = microseconds_; tm->is_gmt(state, is_gmt_); return tm; }
Array* String::awk_split(STATE, Fixnum* f_limit) { native_int limit = f_limit->to_native(); native_int sz = size(); uint8_t* start = byte_address(); int end = 0; int begin = 0; native_int i = 0; if(limit > 0) i = 1; bool skip = true; Array* ary = Array::create(state, 3); Class* out_class = class_object(state); int taint = (is_tainted_p() ? 1 : 0); // Algorithm ported from MRI for(uint8_t* ptr = start; ptr < start+sz; ptr++) { if(skip) { if(ISSPACE(*ptr)) { begin++; } else { end = begin + 1; skip = false; if(limit > 0 && limit <= i) break; } } else { if(ISSPACE(*ptr)) { String* str = String::create(state, (const char*)start+begin, end-begin); str->klass(state, out_class); str->set_tainted(taint); ary->append(state, str); skip = true; begin = end + 1; if(limit > 0) i++; } else { end++; } } } int fin_sz = sz-begin; if(fin_sz > 0 || (limit > 0 && i <= limit) || limit < 0) { String* str = String::create(state, (const char*)start+begin, fin_sz); str->klass(state, out_class); str->set_tainted(taint); ary->append(state, str); } return ary; }
Array* Array::new_reserved(STATE, Fixnum* count) { Array* ary = state->new_object_dirty<Array>(class_object(state)); ary->start(state, Fixnum::from(0)); ary->total(state, Fixnum::from(0)); native_int total = count->to_native(); if(total <= 0) total = 1; ary->tuple(state, Tuple::create(state, total)); return ary; }
/// Free vector memory used by this class (work space) static void clear(void) { CPPAD_ASSERT_KNOWN( ! thread_alloc::in_parallel() , "cannot use atomic_base clear during parallel execution" ); size_t i = class_object().size(); while(i--) { size_t thread = CPPAD_MAX_NUM_THREADS; while(thread--) { atomic_base* op = class_object()[i]; if( op != CPPAD_NULL ) { op->afun_vx_[thread].clear(); op->afun_vy_[thread].clear(); op->afun_tx_[thread].clear(); op->afun_ty_[thread].clear(); } } } return; }
String* String::string_dup(STATE) { String* ns; ns = as<String>(duplicate(state)); ns->shared(state, Qtrue); shared(state, Qtrue); // Fix for subclassing ns->klass(state, class_object(state)); return ns; }
LookupTable* LookupTable::duplicate(STATE) { size_t size, i; LookupTable *dup; size = bins_->to_native(); dup = LookupTable::create(state, size); // Allow for subclassing. dup->klass(state, class_object(state)); size_t num = entries_->to_native(); Array* entries = all_entries(state); for(i = 0; i < num; i++) { LookupTableBucket* entry = as<LookupTableBucket>(entries->get(state, i)); dup->store(state, entry->key(), entry->value()); } return dup; }
MethodTable* MethodTable::duplicate(STATE) { size_t size, i; MethodTable *dup; size = bins_->to_native(); dup = MethodTable::create(state, size); // Allow for subclassing. dup->klass(state, class_object(state)); size_t num = entries_->to_native(); Array* entries = all_entries(state); for(i = 0; i < num; i++) { MethodTableBucket* entry = as<MethodTableBucket>(entries->get(state, i)); dup->store(state, entry->name(), entry->method(), entry->visibility()); } return dup; }
LookupTable* LookupTable::dup(STATE) { size_t size, i; LookupTable *dup; size = bins_->to_native(); dup = LookupTable::create(state, size); state->om->set_class(dup, class_object(state)); size_t num = entries_->to_native(); Array* entries = all_entries(state); for(i = 0; i < num; i++) { Tuple* entry = as<Tuple>(entries->get(state, i)); Object* key = entry->at(state, 0); Object* value = entry->at(state, 1); dup->store(state, key, value); } return dup; }
Array* Array::new_range(STATE, Fixnum* start, Fixnum* count) { Array* ary = state->new_object<Array>(class_object(state)); ary->total(state, count); ary->start(state, Fixnum::from(0)); native_int total = count->to_native(); if(total <= 0) { ary->tuple(state, Tuple::create(state, 0)); } else { Tuple* tup = Tuple::create(state, total); Tuple* orig = tuple_; for(native_int i = 0, j = start->to_native(); i < total; i++, j++) { tup->put(state, i, orig->at(state, j)); } ary->tuple(state, tup); } return ary; }
String* String::substring(STATE, Fixnum* start_f, Fixnum* count_f) { native_int start = start_f->to_native(); native_int count = count_f->to_native(); native_int total = num_bytes_->to_native(); native_int data_size = as<CharArray>(data_)->size(); // Clamp the string size the maximum underlying byte array size if(unlikely(total > data_size)) { total = data_size; } if(count < 0) return nil<String>(); if(start < 0) { start += total; if(start < 0) return nil<String>(); } if(start > total) return nil<String>(); if(start + count > total) { count = total - start; } if(count < 0) count = 0; String* sub = String::create(state, Fixnum::from(count)); sub->klass(state, class_object(state)); uint8_t* buf = byte_address() + start; memcpy(sub->byte_address(), buf, count); if(tainted_p(state) == Qtrue) sub->taint(state); return sub; }
char *FigureTypeUID (Type t) { struct buffer_s buffer; unsigned char hash[20]; SHS_CTX ctx; static boolean initialized = FALSE; Type t2; if (! initialized) { verbose = (getenv ("ILU_TYPE_UID_VERBOSE") != NULL); initialized = TRUE; } if (type_uid(t) != NULL) return (type_uid(t)); assert((t->importInterfaceName == NULL) || (type_kind(t) == alias_Type)); if (type_kind(t) == alias_Type || t->importInterfaceName != NULL) return (type_uid(t) = ilu_strdup(FigureTypeUID(under_type(t)))); if (type_kind(t) == object_Type && class_object(t) != NULL && class_object(t)->corba_rep_id != NULL) return (type_uid(t) = class_object(t)->corba_rep_id); if (verbose && !t->builtIn) fprintf(stderr, "figuring 'ilut:' uid for <%s> (addr %p, ifc addr %p) from %s\n", full_type_name(t), t, t->interface, ((t->importInterfaceName != NULL) ? t->importInterfaceName : "(current ifc)")); buffer.data = (unsigned char *) iluparser_Malloc(buffer.size = 1024); buffer.used = 0; form_typedesc (t, &buffer); buffer.data[buffer.used] = '\0'; if (verbose && ! t->builtIn) fprintf (stderr, " buffer is <%*.*s>\n", buffer.used, buffer.used, buffer.data); SHSInit(&ctx); SHSUpdate (&ctx, buffer.data, buffer.used); SHSFinal (hash, &ctx); /* { int i; fprintf (stderr, " hash is "); for (i = 0; i < 20; i += 1) fprintf (stderr, "%u ", hash[i]); fprintf (stderr, "\n"); } */ type_uid(t) = (char *) iluparser_Malloc(40); strcpy (type_uid(t), "ilut:"); /* convert to base 64 */ convbase(hash,20,type_uid(t) + strlen(type_uid(t))); iluparser_Free(buffer.data); if (verbose && !t->builtIn) fprintf (stderr, " uid for %s is %s\n", type_name(t), type_uid(t)); if (iluparser_CString_Type == NULL && t->importInterfaceName == NULL && type_interface(t) != NULL && strcmp(interface_name(type_interface(t)), "ilu") == 0 && strcmp(type_name(t), "CString") == 0) iluparser_CString_Type = t; return (type_uid(t)); }
/// destructor informs CppAD that this atomic function with this index /// has dropped out of scope by setting its pointer to null virtual ~atomic_base(void) { CPPAD_ASSERT_UNKNOWN( class_object().size() > index_ ); // change object pointer to null, but leave name for error reporting class_object()[index_] = CPPAD_NULL; }
/// atomic_base function object corresponding to a certain index static atomic_base* class_object(size_t index) { CPPAD_ASSERT_UNKNOWN( class_object().size() > index ); return class_object()[index]; }
static void add_typedesc (Type t, struct buffer_s *data, /* OUT */ list referenced) { unsigned int i, j; Argument arg; EnumField ef; Exception exn; Class obj; Procedure meth; struct ilu_integerLiteral_s lit; printmToBuffer (data, "(type %s %s ", t->builtIn ? "ilu" : interface_name(type_interface(t)), type_name(t)); add_quoted_string(t->brand, data); switch (type_basic_type(t)) { case array_Type: print0ToBuffer (data, " (array "); add_typeref (type_description(t)->structuredDes.array.type, data, referenced); for (i = 0; i < list_size(type_description(t)->structuredDes.array.dimensions); i++) { printmToBuffer (data, " (fixed %lu)", (unsigned) list_ref(type_description(t)->structuredDes.array.dimensions, i)); } print0ToBuffer (data, ")"); break; case sequence_Type: print0ToBuffer (data, " (sequence "); add_typeref (type_description(t)->structuredDes.sequence.type, data, referenced); printmToBuffer (data, " (variable %lu))", (type_description(t)->structuredDes.sequence.limit == 0) ? 0xFFFFFFFF : ((unsigned long) type_description(t)->structuredDes.sequence.limit)); break; case record_Type: print0ToBuffer (data, " (record"); for (i = 0; i < list_size(type_description(t)->structuredDes.record.fields); i++) { printmToBuffer (data, " (field %s ", argument_name((Argument) list_ref(type_description(t)->structuredDes.record.fields, i))); add_typeref (argument_type((Argument) list_ref(type_description(t)->structuredDes.record.fields, i)), data, referenced); print0ToBuffer (data, ")"); }; print0ToBuffer (data, ")"); break; case optional_Type: print0ToBuffer (data, " (optional "); add_typeref (type_description(t)->structuredDes.optional, data, referenced); print0ToBuffer (data, ")"); break; case reference_Type: print0ToBuffer (data, " (reference "); add_typeref (type_description(t)->structuredDes.reference.base_type, data, referenced); printmToBuffer (data, " (optional %s)", type_description(t)->structuredDes.reference.optional ? "true" : "false"); printmToBuffer (data, " (aliased %s)", type_description(t)->structuredDes.reference.aliased ? "true" : "false"); print0ToBuffer (data, ")"); break; case union_Type: print0ToBuffer (data, " (union "); add_typeref (type_description(t)->structuredDes.uniond.discriminator_type, data, referenced); for (i = 0; i < list_size(type_description(t)->structuredDes.uniond.types); i++) { arg = (Argument) list_ref(type_description(t)->structuredDes.uniond.types, i); print0ToBuffer (data, " (arm "); add_typeref (argument_type(arg), data, referenced); if (argument_name(arg) != NULL) printmToBuffer (data, "(name %s) ", argument_name(arg)); printmToBuffer (data, "(%s)", (arg == type_description(t)->structuredDes.uniond.default_arm) ? "default" : ""); for (j = 0; j < list_size(arg->values); j++) { print0ToBuffer(data, "(val"); add_constant (list_ref(arg->values, j), data); print0ToBuffer(data, ")"); } print0ToBuffer (data, ")"); } if (type_description(t)->structuredDes.uniond.others_allowed) print0ToBuffer (data, " ((default) void)"); print0ToBuffer (data, ")"); break; case enumeration_Type: print0ToBuffer (data, " (enumeration"); for (i = 0; i < list_size(type_description(t)->structuredDes.enumeration); i++) { ef = (EnumField) list_ref(type_description(t)->structuredDes.enumeration, i); printmToBuffer (data, " (element %s %d)", ef->name, ef->id); } print0ToBuffer (data, ")"); break; case fixedpoint_Type: print0ToBuffer (data, " (fixedpoint "); add_integer_literal (type_description(t)->structuredDes.fixed.min_numerator, data); print0ToBuffer (data, " "); add_integer_literal (type_description(t)->structuredDes.fixed.max_numerator, data); print0ToBuffer (data, " "); lit = *type_description(t)->structuredDes.fixed.denominator; if (lit.negative) { lit.negative = FALSE; print0ToBuffer (data, "1/"); } add_integer_literal (&lit, data); print0ToBuffer (data, ")"); break; case string_Type: printmToBuffer (data, " (string %lu \"%s\" %u)", type_description(t)->structuredDes.string.max_length, ((type_description(t)->structuredDes.string.language != 0) ? type_description(t)->structuredDes.string.language : ""), type_description(t)->structuredDes.string.charset); break; case object_Type: obj = class_object(t); print0ToBuffer (data, " (object"); if (obj->singleton != NULL) printmToBuffer (data, " (singleton \"%s\")", obj->singleton); if (obj->optional) print0ToBuffer (data, " optional"); if (obj->collectible) print0ToBuffer (data, " collectible"); for (i = 0; i < list_size(obj->superclasses); i++) { print0ToBuffer (data, " (supertype "); add_typeref ((Type) list_ref(obj->superclasses, i), data, referenced); print0ToBuffer (data, ")"); } for (i = 0; i < list_size(obj->methods); i++) { meth = (Procedure) list_ref(obj->methods, i); printmToBuffer (data, " (method %s %s%s(returns", name_base_name(meth->name), meth->asynch ? "asynchronous " : "", meth->functional ? "functional " : ""); if (meth->returnType != NULL && (type_ur_kind(meth->returnType) != void_Type)) { print0ToBuffer (data, " "); add_typeref (meth->returnType, data, referenced); } else { print0ToBuffer (data, " void"); } for (j = 0; j < list_size(meth->exceptions); j++) { print0ToBuffer (data, " "); exn = (Exception) list_ref(meth->exceptions, j); add_exnref (exn, data, referenced); } print0ToBuffer (data, ")"); for (j = 0; j < list_size(meth->arguments); j++) { arg = (Argument) list_ref(meth->arguments, j); printmToBuffer (data, " (parameter %s %s ", argument_name(arg), argument_direction(arg)); add_typeref (argument_type(arg), data, referenced); if (arg->sibling) print0ToBuffer (data, " sibling"); print0ToBuffer(data, ")"); } print0ToBuffer (data, ")"); } print0ToBuffer (data, ")"); break; case alias_Type: print0ToBuffer (data, " (redef "); add_typeref(under_type(t), data, referenced); printmToBuffer (data, " \"%s\")", type_uid(t)); break; default: break; } print0ToBuffer (data, ")"); }