static void test_noreftracking() { /* Reftracking is not required; clients can pass UPB_UNTRACKED_REF for owner. */ upb_msgdef *md = upb_msgdef_new(UPB_UNTRACKED_REF); upb_msgdef_ref(md, UPB_UNTRACKED_REF); /* Clients can mix tracked and untracked refs. */ upb_msgdef_ref(md, &md); upb_msgdef_unref(md, UPB_UNTRACKED_REF); upb_msgdef_unref(md, UPB_UNTRACKED_REF); /* Call some random function on the messagedef to test that it is alive. */ ASSERT(!upb_msgdef_isfrozen(md)); upb_msgdef_unref(md, &md); }
upb_handlers *upb_handlers_new(const upb_msgdef *md, const void *owner) { assert(upb_msgdef_isfrozen(md)); int extra = sizeof(upb_handlers_tabent) * (md->selector_count - 1); upb_handlers *h = calloc(sizeof(*h) + extra, 1); if (!h) return NULL; h->msg = md; upb_msgdef_ref(h->msg, h); upb_status_clear(&h->status_); h->sub = calloc(md->submsg_field_count, sizeof(*h->sub)); if (!h->sub) goto oom; if (!upb_refcounted_init(UPB_UPCAST(h), &vtbl, owner)) goto oom; // calloc() above initialized all handlers to NULL. return h; oom: freehandlers(UPB_UPCAST(h)); return NULL; }
bool upb_msgdef_addfields(upb_msgdef *m, upb_fielddef *const *fields, int n) { // Check constraints for all fields before performing any action. for (int i = 0; i < n; i++) { upb_fielddef *f = fields[i]; assert(upb_atomic_read(&f->refcount) > 0); if (f->name == NULL || f->number == 0 || upb_msgdef_itof(m, f->number) || upb_msgdef_ntof(m, f->name)) return false; } // Constraint checks ok, perform the action. for (int i = 0; i < n; i++) { upb_fielddef *f = fields[i]; upb_msgdef_ref(m); assert(f->msgdef == NULL); f->msgdef = m; upb_itof_ent itof_ent = {0, f}; upb_inttable_insert(&m->itof, f->number, &itof_ent); upb_strtable_insert(&m->ntof, f->name, &f); } return true; }
MessageLayout* create_layout(const upb_msgdef* msgdef) { MessageLayout* layout = ALLOC(MessageLayout); int nfields = upb_msgdef_numfields(msgdef); upb_msg_field_iter it; upb_msg_oneof_iter oit; size_t off = 0; layout->fields = ALLOC_N(MessageField, nfields); size_t hasbit = 0; for (upb_msg_field_begin(&it, msgdef); !upb_msg_field_done(&it); upb_msg_field_next(&it)) { const upb_fielddef* field = upb_msg_iter_field(&it); if (upb_fielddef_haspresence(field)) { layout->fields[upb_fielddef_index(field)].hasbit = hasbit++; } else { layout->fields[upb_fielddef_index(field)].hasbit = MESSAGE_FIELD_NO_HASBIT; } } if (hasbit != 0) { off += (hasbit + 8 - 1) / 8; } for (upb_msg_field_begin(&it, msgdef); !upb_msg_field_done(&it); upb_msg_field_next(&it)) { const upb_fielddef* field = upb_msg_iter_field(&it); size_t field_size; if (upb_fielddef_containingoneof(field)) { // Oneofs are handled separately below. continue; } // Allocate |field_size| bytes for this field in the layout. field_size = 0; if (upb_fielddef_label(field) == UPB_LABEL_REPEATED) { field_size = sizeof(VALUE); } else { field_size = native_slot_size(upb_fielddef_type(field)); } // Align current offset up to |size| granularity. off = align_up_to(off, field_size); layout->fields[upb_fielddef_index(field)].offset = off; layout->fields[upb_fielddef_index(field)].case_offset = MESSAGE_FIELD_NO_CASE; off += field_size; } // Handle oneofs now -- we iterate over oneofs specifically and allocate only // one slot per oneof. // // We assign all value slots first, then pack the 'case' fields at the end, // since in the common case (modern 64-bit platform) these are 8 bytes and 4 // bytes respectively and we want to avoid alignment overhead. // // Note that we reserve 4 bytes (a uint32) per 'case' slot because the value // space for oneof cases is conceptually as wide as field tag numbers. In // practice, it's unlikely that a oneof would have more than e.g. 256 or 64K // members (8 or 16 bits respectively), so conceivably we could assign // consecutive case numbers and then pick a smaller oneof case slot size, but // the complexity to implement this indirection is probably not worthwhile. for (upb_msg_oneof_begin(&oit, msgdef); !upb_msg_oneof_done(&oit); upb_msg_oneof_next(&oit)) { const upb_oneofdef* oneof = upb_msg_iter_oneof(&oit); upb_oneof_iter fit; // Always allocate NATIVE_SLOT_MAX_SIZE bytes, but share the slot between // all fields. size_t field_size = NATIVE_SLOT_MAX_SIZE; // Align the offset. off = align_up_to(off, field_size); // Assign all fields in the oneof this same offset. for (upb_oneof_begin(&fit, oneof); !upb_oneof_done(&fit); upb_oneof_next(&fit)) { const upb_fielddef* field = upb_oneof_iter_field(&fit); layout->fields[upb_fielddef_index(field)].offset = off; } off += field_size; } // Now the case fields. for (upb_msg_oneof_begin(&oit, msgdef); !upb_msg_oneof_done(&oit); upb_msg_oneof_next(&oit)) { const upb_oneofdef* oneof = upb_msg_iter_oneof(&oit); upb_oneof_iter fit; size_t field_size = sizeof(uint32_t); // Align the offset. off = (off + field_size - 1) & ~(field_size - 1); // Assign all fields in the oneof this same offset. for (upb_oneof_begin(&fit, oneof); !upb_oneof_done(&fit); upb_oneof_next(&fit)) { const upb_fielddef* field = upb_oneof_iter_field(&fit); layout->fields[upb_fielddef_index(field)].case_offset = off; } off += field_size; } layout->size = off; layout->msgdef = msgdef; upb_msgdef_ref(layout->msgdef, &layout->msgdef); return layout; }