v8::Local<v8::Map> Msg_Struct::build_object_map(const Field_Info &field_info, Block_Buffer &buffer, Isolate* isolate) { EscapableHandleScope handle_scope(isolate); uint16_t vec_size = 0; buffer.read_uint16(vec_size); Local<Map> map = Map::New(isolate); if(is_struct(field_info.field_type)) { for(uint16_t i = 0; i < vec_size; ++i) { Local<Object> object = build_object_struct(field_info, buffer, isolate); Local<Value> key = object->Get(isolate->GetCurrentContext(), String::NewFromUtf8(isolate, field_info.key_name.c_str(), NewStringType::kNormal).ToLocalChecked()).ToLocalChecked(); map->Set(isolate->GetCurrentContext(), key, object).ToLocalChecked(); } } else { Field_Info key_info; key_info.field_label = "args"; key_info.field_type = field_info.key_type; key_info.field_name = field_info.key_name; Local<Value> key = build_object_arg(key_info, buffer, isolate); Local<Value> value = build_object_arg(field_info, buffer, isolate); map->Set(isolate->GetCurrentContext(), key, value).ToLocalChecked(); } return handle_scope.Escape(map); }
void Msg_Struct::build_buffer_map(const Field_Info &field_info, Block_Buffer &buffer, Isolate* isolate, v8::Local<v8::Value> value) { if (!value->IsMap()) { LOG_ERROR("field_name:%s is not map, struct_name:%s", field_info.field_name.c_str(), struct_name().c_str()); buffer.write_uint16(0); return; } Local<Map> map = Local<Map>::Cast(value); int16_t len = map->Size(); buffer.write_uint16(len); Local<Array> array = map->AsArray(); //index N is the Nth key and index N + 1 is the Nth value. if(is_struct(field_info.field_type)) { for (int i = 0; i < len * 2; i = i + 2) { Local<Value> element = array->Get(isolate->GetCurrentContext(), i + 1).ToLocalChecked(); build_buffer_struct(field_info, buffer, isolate, element); } } else { Field_Info key_info; key_info.field_label = "args"; key_info.field_type = field_info.key_type; key_info.field_name = field_info.key_name; for (int i = 0; i < len * 2; i = i + 2) { Local<Value> key = array->Get(isolate->GetCurrentContext(), i).ToLocalChecked(); Local<Value> element = array->Get(isolate->GetCurrentContext(), i + 1).ToLocalChecked(); build_buffer_struct(key_info, buffer, isolate, key); build_buffer_struct(field_info, buffer, isolate, element); } } }
def_t * emit_structure (const char *name, int su, struct_def_t *defs, type_t *type, void *data, storage_class_t storage) { int i, j; int saw_null = 0; int saw_func = 0; symbol_t *struct_sym; symbol_t *field_sym; def_t *struct_def; def_t field_def; name = save_string (name); if (!type) type = make_structure (0, su, defs, 0)->type; if (!is_struct (type) || (su == 's' && type->meta != ty_struct) || (su == 'u' && type->meta != ty_union)) internal_error (0, "structure %s type mismatch", name); for (i = 0, field_sym = type->t.symtab->symbols; field_sym; i++, field_sym = field_sym->next) { if (!defs[i].name) internal_error (0, "structure %s unexpected end of defs", name); if (field_sym->type != defs[i].type) internal_error (0, "structure %s.%s field type mismatch", name, defs[i].name); if ((!defs[i].emit && saw_func) || (defs[i].emit && saw_null)) internal_error (0, "structure %s mixed emit/copy", name); if (!defs[i].emit) saw_null = 1; if (defs[i].emit) saw_func = 1; } if (defs[i].name) internal_error (0, "structure %s too many defs", name); if (storage != sc_global && storage != sc_static) internal_error (0, "structure %s must be global or static", name); struct_sym = make_symbol (name, type, pr.far_data, storage); struct_def = struct_sym->s.def; if (struct_def->initialized) internal_error (0, "structure %s already initialized", name); struct_def->initialized = struct_def->constant = 1; struct_def->nosave = 1; for (i = 0, field_sym = type->t.symtab->symbols; field_sym; i++, field_sym = field_sym->next) { field_def.type = field_sym->type; field_def.name = save_string (va ("%s.%s", name, field_sym->name)); field_def.space = struct_def->space; field_def.offset = struct_def->offset + field_sym->s.offset; if (!defs[i].emit) { //FIXME relocs? arrays? structs? pr_type_t *val = (pr_type_t *) data; memcpy (D_POINTER (void, &field_def), val, type_size (field_def.type) * sizeof (pr_type_t)); data = &val[type_size (field_def.type)]; } else { if (is_array (field_def.type)) {
bool binspector_parser_t::is_struct_set() { bool result = false; while (is_struct() || is_pp_statement()) result = true; return result; }
etype_t low_level_type (type_t *type) { if (type->type >= ev_type_count) internal_error (0, "invalid type"); if (type->type == ev_type_count) internal_error (0, "found 'type count' type"); if (type->type < ev_invalid) return type->type; if (is_enum (type)) return type_default->type; if (is_struct (type)) return ev_void; if (is_array (type)) return ev_void; internal_error (0, "invalid complex type"); }
void Msg_Struct::build_buffer_vector(const Field_Info &field_info, Block_Buffer &buffer, Isolate* isolate, v8::Local<v8::Value> value) { if (!value->IsArray()) { LOG_ERROR("field_name:%s is not array, struct_name:%s", field_info.field_name.c_str(), struct_name().c_str()); buffer.write_uint16(0); return; } Local<Array> array = Local<Array>::Cast(value); int16_t len = array->Length(); buffer.write_uint16(len); for (int i = 0; i < len; ++i) { Local<Value> element = array->Get(isolate->GetCurrentContext(), i).ToLocalChecked(); if(is_struct(field_info.field_type)) { build_buffer_struct(field_info, buffer, isolate, element); } else { build_buffer_arg(field_info, buffer, isolate, element); } } }
v8::Local<v8::Array> Msg_Struct::build_object_vector(const Field_Info &field_info, Block_Buffer &buffer, Isolate* isolate) { EscapableHandleScope handle_scope(isolate); uint16_t vec_size = 0; buffer.read_uint16(vec_size); Local<Array> array = Array::New(isolate, vec_size); if(is_struct(field_info.field_type)) { for(uint16_t i = 0; i < vec_size; ++i) { Local<Object> object = build_object_struct(field_info, buffer, isolate); array->Set(isolate->GetCurrentContext(), i, object).FromJust(); } } else { for(uint16_t i = 0; i < vec_size; ++i) { Local<Value> value = build_object_arg(field_info, buffer, isolate); array->Set(isolate->GetCurrentContext(), i, value).FromJust(); } } return handle_scope.Escape(array); }
void refresh_compound_sizes(void) { int c, sz, i, nmemb; basetype_t *t0, *t1; for (c = 0; c < types_table_size; c++) { if (is_array(types_table[c].ohm_type)) { sz = get_type_size(types_table[c].elems[0]); types_table[c].size = types_table[c].nelem * sz; } else if (is_struct(types_table[c].ohm_type)) { nmemb = types_table[c].nelem; if (!nmemb) continue; for (i = 0; i < nmemb-1; ++i) { t0 = types_table[c].elems[i]; t1 = types_table[c].elems[i+1]; t0->size = t1->size - t0->size; } t0 = types_table[c].elems[nmemb-1]; t0->size = types_table[c].size - t0->size; } } }
etype_t low_level_type (type_t *type) { if (type->type >= ev_type_count) internal_error (0, "invalid type"); if (type->type == ev_type_count) internal_error (0, "found 'type count' type"); if (type->type < ev_invalid) return type->type; if (is_enum (type)) return type_default->type; if (is_struct (type)) { //FIXME does this break anything? //maybe the peephole optimizer should do this sort of thing. if (type_size (type) == 1) return ev_integer; return ev_void; } if (is_array (type)) return ev_void; internal_error (0, "invalid complex type"); }
static void member_declaration_list(struct typetree *type) { struct namespace ns = {0}; struct typetree *decl_base, *decl_type; const char *name; push_scope(&ns); do { decl_base = declaration_specifiers(NULL); do { name = NULL; decl_type = declarator(decl_base, &name); if (!name) { error("Missing name in member declarator."); exit(1); } else if (!size_of(decl_type)) { error("Field '%s' has incomplete type '%t'.", name, decl_type); exit(1); } else { sym_add(&ns, name, decl_type, SYM_DECLARATION, LINK_NONE); type_add_member(type, name, decl_type); } if (peek().token == ',') { consume(','); continue; } } while (peek().token != ';'); consume(';'); } while (peek().token != '}'); pop_scope(&ns); } static struct typetree *struct_or_union_declaration(void) { struct symbol *sym = NULL; struct typetree *type = NULL; enum type kind = (next().token == STRUCT) ? T_STRUCT : T_UNION; if (peek().token == IDENTIFIER) { const char *name = consume(IDENTIFIER).strval; sym = sym_lookup(&ns_tag, name); if (!sym) { type = type_init(kind); sym = sym_add(&ns_tag, name, type, SYM_TYPEDEF, LINK_NONE); } else if (is_integer(&sym->type)) { error("Tag '%s' was previously declared as enum.", sym->name); exit(1); } else if (sym->type.type != kind) { error("Tag '%s' was previously declared as %s.", sym->name, (sym->type.type == T_STRUCT) ? "struct" : "union"); exit(1); } /* Retrieve type from existing symbol, possibly providing a complete * definition that will be available for later declarations. Overwrites * existing type information from symbol table. */ type = &sym->type; if (peek().token == '{' && type->size) { error("Redefiniton of '%s'.", sym->name); exit(1); } } if (peek().token == '{') { if (!type) { /* Anonymous structure; allocate a new standalone type, * not part of any symbol. */ type = type_init(kind); } consume('{'); member_declaration_list(type); assert(type->size); consume('}'); } /* Return to the caller a copy of the root node, which can be overwritten * with new type qualifiers without altering the tag registration. */ return (sym) ? type_tagged_copy(&sym->type, sym->name) : type; } static void enumerator_list(void) { struct var val; struct symbol *sym; int enum_value = 0; consume('{'); do { const char *name = consume(IDENTIFIER).strval; if (peek().token == '=') { consume('='); val = constant_expression(); if (!is_integer(val.type)) { error("Implicit conversion from non-integer type in enum."); } enum_value = val.imm.i; } sym = sym_add( &ns_ident, name, &basic_type__int, SYM_ENUM_VALUE, LINK_NONE); sym->enum_value = enum_value++; if (peek().token != ',') break; consume(','); } while (peek().token != '}'); consume('}'); } static struct typetree *enum_declaration(void) { struct typetree *type = type_init(T_SIGNED, 4); consume(ENUM); if (peek().token == IDENTIFIER) { struct symbol *tag = NULL; const char *name = consume(IDENTIFIER).strval; tag = sym_lookup(&ns_tag, name); if (!tag || tag->depth < ns_tag.current_depth) { tag = sym_add(&ns_tag, name, type, SYM_TYPEDEF, LINK_NONE); } else if (!is_integer(&tag->type)) { error("Tag '%s' was previously defined as aggregate type.", tag->name); exit(1); } /* Use enum_value as a sentinel to represent definition, checked on * lookup to detect duplicate definitions. */ if (peek().token == '{') { if (tag->enum_value) { error("Redefiniton of enum '%s'.", tag->name); } enumerator_list(); tag->enum_value = 1; } } else { enumerator_list(); } /* Result is always integer. Do not care about the actual enum definition, * all enums are ints and no type checking is done. */ return type; } static struct typetree get_basic_type_from_specifier(unsigned short spec) { switch (spec) { case 0x0001: /* void */ return basic_type__void; case 0x0002: /* char */ case 0x0012: /* signed char */ return basic_type__char; case 0x0022: /* unsigned char */ return basic_type__unsigned_char; case 0x0004: /* short */ case 0x0014: /* signed short */ case 0x000C: /* short int */ case 0x001C: /* signed short int */ return basic_type__short; case 0x0024: /* unsigned short */ case 0x002C: /* unsigned short int */ return basic_type__unsigned_short; case 0x0008: /* int */ case 0x0010: /* signed */ case 0x0018: /* signed int */ return basic_type__int; case 0x0020: /* unsigned */ case 0x0028: /* unsigned int */ return basic_type__unsigned_int; case 0x0040: /* long */ case 0x0050: /* signed long */ case 0x0048: /* long int */ case 0x0058: /* signed long int */ case 0x00C0: /* long long */ case 0x00D0: /* signed long long */ case 0x00D8: /* signed long long int */ return basic_type__long; case 0x0060: /* unsigned long */ case 0x0068: /* unsigned long int */ case 0x00E0: /* unsigned long long */ case 0x00E8: /* unsigned long long int */ return basic_type__unsigned_long; case 0x0100: /* float */ return basic_type__float; case 0x0200: /* double */ case 0x0240: /* long double */ return basic_type__double; default: error("Invalid type specification."); exit(1); } } /* Parse type, qualifiers and storage class. Do not assume int by default, but * require at least one type specifier. Storage class is returned as token * value, unless the provided pointer is NULL, in which case the input is parsed * as specifier-qualifier-list. */ struct typetree *declaration_specifiers(int *stc) { struct typetree *type = NULL; struct token tok; int done = 0; /* Use a compact bit representation to hold state about declaration * specifiers. Initialize storage class to sentinel value. */ unsigned short spec = 0x0000; enum qualifier qual = Q_NONE; if (stc) *stc = '$'; #define set_specifier(d) \ if (spec & d) error("Duplicate type specifier '%s'.", tok.strval); \ next(); spec |= d; #define set_qualifier(d) \ if (qual & d) error("Duplicate type qualifier '%s'.", tok.strval); \ next(); qual |= d; #define set_storage_class(t) \ if (!stc) error("Unexpected storage class in qualifier list."); \ else if (*stc != '$') error("Multiple storage class specifiers."); \ next(); *stc = t; do { switch ((tok = peek()).token) { case VOID: set_specifier(0x001); break; case CHAR: set_specifier(0x002); break; case SHORT: set_specifier(0x004); break; case INT: set_specifier(0x008); break; case SIGNED: set_specifier(0x010); break; case UNSIGNED: set_specifier(0x020); break; case LONG: if (spec & 0x040) { set_specifier(0x080); } else { set_specifier(0x040); } break; case FLOAT: set_specifier(0x100); break; case DOUBLE: set_specifier(0x200); break; case CONST: set_qualifier(Q_CONST); break; case VOLATILE: set_qualifier(Q_VOLATILE); break; case IDENTIFIER: { struct symbol *tag = sym_lookup(&ns_ident, tok.strval); if (tag && tag->symtype == SYM_TYPEDEF && !type) { consume(IDENTIFIER); type = type_init(T_STRUCT); *type = tag->type; } else { done = 1; } break; } case UNION: case STRUCT: if (!type) { type = struct_or_union_declaration(); } else { done = 1; } break; case ENUM: if (!type) { type = enum_declaration(); } else { done = 1; } break; case AUTO: case REGISTER: case STATIC: case EXTERN: case TYPEDEF: set_storage_class(tok.token); break; default: done = 1; break; } if (type && spec) { error("Invalid combination of declaration specifiers."); exit(1); } } while (!done); #undef set_specifier #undef set_qualifier #undef set_storage_class if (type) { if (qual & type->qualifier) { error("Duplicate type qualifier:%s%s.", (qual & Q_CONST) ? " const" : "", (qual & Q_VOLATILE) ? " volatile" : ""); } } else if (spec) { type = type_init(T_STRUCT); *type = get_basic_type_from_specifier(spec); } else { error("Missing type specifier."); exit(1); } type->qualifier |= qual; return type; } /* Set var = 0, using simple assignment on members for composite types. This * rule does not consume any input, but generates a series of assignments on the * given variable. Point is to be able to zero initialize using normal simple * assignment rules, although IR can become verbose for large structures. */ static void zero_initialize(struct block *block, struct var target) { int i; struct var var; assert(target.kind == DIRECT); switch (target.type->type) { case T_STRUCT: case T_UNION: target.type = unwrapped(target.type); var = target; for (i = 0; i < nmembers(var.type); ++i) { target.type = get_member(var.type, i)->type; target.offset = var.offset + get_member(var.type, i)->offset; zero_initialize(block, target); } break; case T_ARRAY: assert(target.type->size); var = target; target.type = target.type->next; assert(is_struct(target.type) || !target.type->next); for (i = 0; i < var.type->size / var.type->next->size; ++i) { target.offset = var.offset + i * var.type->next->size; zero_initialize(block, target); } break; case T_POINTER: var = var_zero(8); var.type = type_init(T_POINTER, &basic_type__void); eval_assign(block, target, var); break; case T_UNSIGNED: case T_SIGNED: var = var_zero(target.type->size); eval_assign(block, target, var); break; default: error("Invalid type to zero-initialize, was '%t'.", target.type); exit(1); } }
matwrap get_field(const char* fieldname) const { safe_assert(is_struct(), "Attempted to access field of a non-struct element."); return mxGetField(array,0,fieldname); }
char *get_token(char *lexeme , int mode){ char *token=(char*)calloc(strlen(lexeme)+50,sizeof(char)); //printf("Getting token\n"); if(is_long(lexeme)){ sprintf(token,"%d",LONG); } else if(is_static(lexeme)){ sprintf(token,"%d",STATIC); } else if(is_union(lexeme)){ sprintf(token,"%d",UNION); } else if(is_default(lexeme)){ sprintf(token,"%d",DEFAULT); } else if(is_break(lexeme)){ sprintf(token,"%d",BREAK); } else if(is_case(lexeme)){ sprintf(token,"%d",CASE); } else if(is_continue(lexeme)){ sprintf(token,"%d",CONTINUE); } else if(is_goto(lexeme)){ sprintf(token,"%d",GOTO); } else if(is_struct(lexeme)){ sprintf(token,"%d",STRUCT); } else if(is_const(lexeme)){ sprintf(token,"%d",CONST); } else if(is_void(lexeme)){ sprintf(token,"%d",VOID); } else if(is_switch(lexeme)){ sprintf(token,"%d",SWITCH); } else if(is_for(lexeme)){ sprintf(token,"%d",FOR); } else if(is_while(lexeme)){ sprintf(token,"%d",WHILE); } else if(is_do(lexeme)){ sprintf(token,"%d",DO); } else if(is_return(lexeme)){ sprintf(token,"%d",RETURN); } else if(is_bool(lexeme)){ sprintf(token,"%d",BOOL); } else if(is_char(lexeme)){ sprintf(token,"%d",CHAR); } else if(is_signed(lexeme)){ sprintf(token,"%d",SIGNED); } else if(is_unsigned(lexeme)){ sprintf(token,"%d",UNSIGNED); } else if(is_short(lexeme)){ sprintf(token,"%d",SHORT); } else if(is_int(lexeme)){ sprintf(token,"%d",INT); } else if(is_float(lexeme)){ sprintf(token,"%d",FLOAT); } else if(is_double(lexeme)){ sprintf(token,"%d",DOUBLE); } else if(is_l_square(lexeme)){ sprintf(token,"%d",L_SQUARE); } else if(is_r_square(lexeme)){ sprintf(token,"%d",R_SQUARE); } else if(is_l_paraen(lexeme)){ sprintf(token,"%d",L_PARAEN); } else if(is_r_paraen(lexeme)){ sprintf(token,"%d",R_PARAEN); } else if(is_l_cbrace(lexeme)){ sprintf(token,"%d",L_CBRACE); } else if(is_r_cbrace(lexeme)){ sprintf(token,"%d",R_CBRACE); } else if(is_comma(lexeme)){ sprintf(token,"%d",COMMA); } else if(is_semicol(lexeme)){ sprintf(token,"%d",SEMICOL); } else if(is_eq_eq(lexeme)){ sprintf(token,"%d",EQ_EQ); } else if(is_lesser(lexeme)){ sprintf(token,"%d",LESSER); } else if(is_less_eq(lexeme)){ sprintf(token,"%d",LESS_EQ); } else if(is_div(lexeme)){ sprintf(token,"%d",DIV); } else if(is_greater(lexeme)){ sprintf(token,"%d",GREATER); } else if(is_great_eq(lexeme)){ sprintf(token,"%d",GREAT_EQ); } else if(is_plus_eq(lexeme)){ sprintf(token,"%d",PLUS_EQ); } else if(is_minus_eq(lexeme)){ sprintf(token,"%d",MINUS_EQ); } else if(is_div_eq(lexeme)){ sprintf(token,"%d",DIV_EQ); } else if(is_mult_eq(lexeme)){ sprintf(token,"%d",MULT_EQ); } else if(is_minus_minus(lexeme)){ sprintf(token,"%d",MINUS_MINUS); } else if(is_plus_plus(lexeme)){ sprintf(token,"%d",PLUS_PLUS); } else if(is_percent(lexeme)){ sprintf(token,"%d",PERCENT); } else if(is_div(lexeme)){ sprintf(token,"%d",DIV); } else if(is_mult(lexeme)){ sprintf(token,"%d",MULT); } else if(is_minus(lexeme)){ sprintf(token,"%d",MINUS); } else if(is_plus(lexeme)){ sprintf(token,"%d",PLUS); } else if(is_int_const(lexeme)){ printf("int"); sprintf(token,"%d\t%s",INT_CONST,lexeme); } else if(is_flo_const(lexeme)){ printf("float"); sprintf(token,"%d\t%s",FLO_CONST,lexeme); } else if(is_comment_start(lexeme)){ sprintf(token,"$start"); } else if(is_comment_end(lexeme)){ sprintf(token,"$end"); } else if(is_identifier(lexeme)){ printf("Identifier"); if(mode==1) ht_set( symboltable, lexeme, "1"); sprintf(token,"%d\t%s",IDNTIFIER,lexeme); } else sprintf(token,"%d",NOTOK); return token; }
bool VerifyObject(flatbuffers::Verifier &v, const reflection::Schema &schema, const reflection::Object &obj, const flatbuffers::Table *table, bool required) { if (!table) { if (!required) return true; else return false; } if (!table->VerifyTableStart(v)) return false; for (uoffset_t i = 0; i < obj.fields()->size(); i++) { auto field_def = obj.fields()->Get(i); switch (field_def->type()->base_type()) { case reflection::None: assert(false); break; case reflection::UType: if (!table->VerifyField<uint8_t>(v, field_def->offset())) return false; break; case reflection::Bool: case reflection::Byte: case reflection::UByte: if (!table->VerifyField<int8_t>(v, field_def->offset())) return false; break; case reflection::Short: case reflection::UShort: if (!table->VerifyField<int16_t>(v, field_def->offset())) return false; break; case reflection::Int: case reflection::UInt: if (!table->VerifyField<int32_t>(v, field_def->offset())) return false; break; case reflection::Long: case reflection::ULong: if (!table->VerifyField<int64_t>(v, field_def->offset())) return false; break; case reflection::Float: if (!table->VerifyField<float>(v, field_def->offset())) return false; break; case reflection::Double: if (!table->VerifyField<double>(v, field_def->offset())) return false; break; case reflection::String: if (!table->VerifyField<uoffset_t>(v, field_def->offset()) || !v.Verify(flatbuffers::GetFieldS(*table, *field_def))) { return false; } break; case reflection::Vector: if (!VerifyVector(v, schema, *table, *field_def)) return false; break; case reflection::Obj: { auto child_obj = schema.objects()->Get(field_def->type()->index()); if (child_obj->is_struct()) { if (!VerifyStruct(v, *table, field_def->offset(), *child_obj, field_def->required())) { return false; } } else { if (!VerifyObject(v, schema, *child_obj, flatbuffers::GetFieldT(*table, *field_def), field_def->required())) { return false; } } break; } case reflection::Union: { // get union type from the prev field voffset_t utype_offset = field_def->offset() - sizeof(voffset_t); auto utype = table->GetField<uint8_t>(utype_offset, 0); if (utype != 0) { // Means we have this union field present auto fb_enum = schema.enums()->Get(field_def->type()->index()); auto child_obj = fb_enum->values()->Get(utype)->object(); if (!VerifyObject(v, schema, *child_obj, flatbuffers::GetFieldT(*table, *field_def), field_def->required())) { return false; } } break; } default: assert(false); break; } } return true; }
bool VerifyVector(flatbuffers::Verifier &v, const reflection::Schema &schema, const flatbuffers::Table &table, const reflection::Field &vec_field) { assert(vec_field.type()->base_type() == reflection::Vector); if (!table.VerifyField<uoffset_t>(v, vec_field.offset())) return false; switch (vec_field.type()->element()) { case reflection::None: assert(false); break; case reflection::UType: return v.Verify(flatbuffers::GetFieldV<uint8_t>(table, vec_field)); case reflection::Bool: case reflection::Byte: case reflection::UByte: return v.Verify(flatbuffers::GetFieldV<int8_t>(table, vec_field)); case reflection::Short: case reflection::UShort: return v.Verify(flatbuffers::GetFieldV<int16_t>(table, vec_field)); case reflection::Int: case reflection::UInt: return v.Verify(flatbuffers::GetFieldV<int32_t>(table, vec_field)); case reflection::Long: case reflection::ULong: return v.Verify(flatbuffers::GetFieldV<int64_t>(table, vec_field)); case reflection::Float: return v.Verify(flatbuffers::GetFieldV<float>(table, vec_field)); case reflection::Double: return v.Verify(flatbuffers::GetFieldV<double>(table, vec_field)); case reflection::String: { auto vecString = flatbuffers::GetFieldV<flatbuffers:: Offset<flatbuffers::String>>(table, vec_field); if (v.Verify(vecString) && v.VerifyVectorOfStrings(vecString)) { return true; } else { return false; } } case reflection::Vector: assert(false); break; case reflection::Obj: { auto obj = schema.objects()->Get(vec_field.type()->index()); if (obj->is_struct()) { if (!VerifyVectorOfStructs(v, table, vec_field.offset(), *obj, vec_field.required())) { return false; } } else { auto vec = flatbuffers::GetFieldV<flatbuffers:: Offset<flatbuffers::Table>>(table, vec_field); if (!v.Verify(vec)) return false; if (vec) { for (uoffset_t j = 0; j < vec->size(); j++) { if (!VerifyObject(v, schema, *obj, vec->Get(j), true)) { return false; } } } } return true; } case reflection::Union: assert(false); break; default: assert(false); break; } return false; }
Offset<const Table *> CopyTable(FlatBufferBuilder &fbb, const reflection::Schema &schema, const reflection::Object &objectdef, const Table &table, bool use_string_pooling) { // Before we can construct the table, we have to first generate any // subobjects, and collect their offsets. std::vector<uoffset_t> offsets; auto fielddefs = objectdef.fields(); for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { auto &fielddef = **it; // Skip if field is not present in the source. if (!table.CheckField(fielddef.offset())) continue; uoffset_t offset = 0; switch (fielddef.type()->base_type()) { case reflection::String: { offset = use_string_pooling ? fbb.CreateSharedString(GetFieldS(table, fielddef)).o : fbb.CreateString(GetFieldS(table, fielddef)).o; break; } case reflection::Obj: { auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index()); if (!subobjectdef.is_struct()) { offset = CopyTable(fbb, schema, subobjectdef, *GetFieldT(table, fielddef)).o; } break; } case reflection::Union: { auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table); offset = CopyTable(fbb, schema, subobjectdef, *GetFieldT(table, fielddef)).o; break; } case reflection::Vector: { auto vec = table.GetPointer<const Vector<Offset<Table>> *>( fielddef.offset()); auto element_base_type = fielddef.type()->element(); auto elemobjectdef = element_base_type == reflection::Obj ? schema.objects()->Get(fielddef.type()->index()) : nullptr; switch (element_base_type) { case reflection::String: { std::vector<Offset<const String *>> elements(vec->size()); auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec); for (uoffset_t i = 0; i < vec_s->size(); i++) { elements[i] = use_string_pooling ? fbb.CreateSharedString(vec_s->Get(i)).o : fbb.CreateString(vec_s->Get(i)).o; } offset = fbb.CreateVector(elements).o; break; } case reflection::Obj: { if (!elemobjectdef->is_struct()) { std::vector<Offset<const Table *>> elements(vec->size()); for (uoffset_t i = 0; i < vec->size(); i++) { elements[i] = CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i)); } offset = fbb.CreateVector(elements).o; break; } } // FALL-THRU default: { // Scalars and structs. auto element_size = GetTypeSize(element_base_type); if (elemobjectdef && elemobjectdef->is_struct()) element_size = elemobjectdef->bytesize(); fbb.StartVector(element_size, vec->size()); fbb.PushBytes(vec->Data(), element_size * vec->size()); offset = fbb.EndVector(vec->size()); break; } } break; } default: // Scalars. break; } if (offset) { offsets.push_back(offset); } } // Now we can build the actual table from either offsets or scalar data. auto start = objectdef.is_struct() ? fbb.StartStruct(objectdef.minalign()) : fbb.StartTable(); size_t offset_idx = 0; for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { auto &fielddef = **it; if (!table.CheckField(fielddef.offset())) continue; auto base_type = fielddef.type()->base_type(); switch (base_type) { case reflection::Obj: { auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index()); if (subobjectdef.is_struct()) { CopyInline(fbb, fielddef, table, subobjectdef.minalign(), subobjectdef.bytesize()); break; } } // ELSE FALL-THRU case reflection::Union: case reflection::String: case reflection::Vector: fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++])); break; default: { // Scalars. auto size = GetTypeSize(base_type); CopyInline(fbb, fielddef, table, size, size); break; } } } assert(offset_idx == offsets.size()); if (objectdef.is_struct()) { fbb.ClearOffsets(); return fbb.EndStruct(); } else { return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size())); } }
int get_field_number(const char* field_name) const { safe_assert(is_struct(), "Attempted to access field of a non-struct element."); return mxGetFieldNumber(array, field_name); }
matwrap get_field(const int field_id) const { safe_assert(is_struct(), "Attempted to access field of a non-struct element."); safe_assert(field_id < get_number_of_fields(), "Invalid field id!"); return mxGetFieldByNumber(array,0,field_id); }