int main(int argc, char * argv[]) { char *testFileName; FILE *fd; struct SymTab *theTable, *firstCopy, *secondCopy; struct SymEntry *anEntry; struct Attributes *anAttr; char buffer[16]; int val1, val2; if (argc != 2) { fprintf(stderr,"usage: SymTabDriver test-data-file\n"); exit(1); } testFileName = argv[1]; fd = fopen(testFileName,"r"); if (!fd) ErrorExit("Can't open input file.\n"); if (!(theTable = CreateSymTab(5))) ErrorExit("Failed to alloc first table.\n"); /* Read lines consisting of a name string and an integer from std input. If name already present increment value2, if new allocate storage for attribute structure and fill in fields. */ while (fscanf(fd,"%15s %d",buffer,&val1) != EOF) { printf("Find: %15s ",buffer); (FindName(theTable,buffer)) ? fprintf(stdout," Present -") : fprintf(stdout," Not Present -"); if (EnterName(theTable,buffer,&anEntry)) { fprintf(stdout," Enter: Present: %15s\n",GetName(anEntry)); anAttr = (struct Attributes *) GetAttr(anEntry); anAttr->value2++; anAttr->value1 = MAX(((struct Attributes *) GetAttr(anEntry))->value1,val1); } else { fprintf(stdout," Enter: Entered: %15s\n",GetName(anEntry)); anAttr = malloc(sizeof(struct Attributes)); anAttr->value1 = val1; anAttr->value2 = 1; SetAttr(anEntry,anAttr); } } fprintf(stdout,"\nContents of Original Table\n"); DisplayTable(theTable); DisplayStatistics(theTable); if (!(firstCopy = CopyTable(theTable,1))) ErrorExit("Failed to alloc first copy table.\n"); DestroySymTab(theTable); fprintf(stdout,"\nContents of First Copy Table\n"); DisplayTable(firstCopy); DisplayStatistics(firstCopy); if (!(secondCopy = CopyTable(firstCopy,100))) ErrorExit("Failed to alloc first copy table.\n"); DestroySymTab(firstCopy); fprintf(stdout,"\nContents of Second Copy Table\n"); DisplayTable(secondCopy); DisplayStatistics(secondCopy); VerifyCounts(secondCopy); FreeAllAttr(secondCopy); DestroySymTab(secondCopy); return 0; }
Offset<const Table *> CopyTable(FlatBufferBuilder &fbb, const reflection::Schema &schema, const reflection::Object &objectdef, const Table &table, bool use_string_pooling) { // Before we can construct the table, we have to first generate any // subobjects, and collect their offsets. std::vector<uoffset_t> offsets; auto fielddefs = objectdef.fields(); for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { auto &fielddef = **it; // Skip if field is not present in the source. if (!table.CheckField(fielddef.offset())) continue; uoffset_t offset = 0; switch (fielddef.type()->base_type()) { case reflection::String: { offset = use_string_pooling ? fbb.CreateSharedString(GetFieldS(table, fielddef)).o : fbb.CreateString(GetFieldS(table, fielddef)).o; break; } case reflection::Obj: { auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index()); if (!subobjectdef.is_struct()) { offset = CopyTable(fbb, schema, subobjectdef, *GetFieldT(table, fielddef)).o; } break; } case reflection::Union: { auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table); offset = CopyTable(fbb, schema, subobjectdef, *GetFieldT(table, fielddef)).o; break; } case reflection::Vector: { auto vec = table.GetPointer<const Vector<Offset<Table>> *>( fielddef.offset()); auto element_base_type = fielddef.type()->element(); auto elemobjectdef = element_base_type == reflection::Obj ? schema.objects()->Get(fielddef.type()->index()) : nullptr; switch (element_base_type) { case reflection::String: { std::vector<Offset<const String *>> elements(vec->size()); auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec); for (uoffset_t i = 0; i < vec_s->size(); i++) { elements[i] = use_string_pooling ? fbb.CreateSharedString(vec_s->Get(i)).o : fbb.CreateString(vec_s->Get(i)).o; } offset = fbb.CreateVector(elements).o; break; } case reflection::Obj: { if (!elemobjectdef->is_struct()) { std::vector<Offset<const Table *>> elements(vec->size()); for (uoffset_t i = 0; i < vec->size(); i++) { elements[i] = CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i)); } offset = fbb.CreateVector(elements).o; break; } } // FALL-THRU default: { // Scalars and structs. auto element_size = GetTypeSize(element_base_type); if (elemobjectdef && elemobjectdef->is_struct()) element_size = elemobjectdef->bytesize(); fbb.StartVector(element_size, vec->size()); fbb.PushBytes(vec->Data(), element_size * vec->size()); offset = fbb.EndVector(vec->size()); break; } } break; } default: // Scalars. break; } if (offset) { offsets.push_back(offset); } } // Now we can build the actual table from either offsets or scalar data. auto start = objectdef.is_struct() ? fbb.StartStruct(objectdef.minalign()) : fbb.StartTable(); size_t offset_idx = 0; for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { auto &fielddef = **it; if (!table.CheckField(fielddef.offset())) continue; auto base_type = fielddef.type()->base_type(); switch (base_type) { case reflection::Obj: { auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index()); if (subobjectdef.is_struct()) { CopyInline(fbb, fielddef, table, subobjectdef.minalign(), subobjectdef.bytesize()); break; } } // ELSE FALL-THRU case reflection::Union: case reflection::String: case reflection::Vector: fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++])); break; default: { // Scalars. auto size = GetTypeSize(base_type); CopyInline(fbb, fielddef, table, size, size); break; } } } assert(offset_idx == offsets.size()); if (objectdef.is_struct()) { fbb.ClearOffsets(); return fbb.EndStruct(); } else { return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size())); } }
void dlgRepCluster::OnOK(wxCommandEvent &ev) { #ifdef __WXGTK__ if (!btnOK->IsEnabled()) return; #endif EnableOK(false); bool done = true; done = connection->ExecuteVoid(wxT("BEGIN TRANSACTION;")); if (remoteConn) done = remoteConn->ExecuteVoid(wxT("BEGIN TRANSACTION;")); // initialize cluster on local node done = connection->ExecuteVoid(GetSql()); if (done && chkJoinCluster->GetValue()) { // we're joining an existing cluster wxString schemaPrefix = qtIdent(wxT("_") + cbClusterName->GetValue()) + wxT("."); wxString clusterVersion = remoteConn->ExecuteScalar( wxT("SELECT ") + schemaPrefix + wxT("slonyversion()")); wxString newVersion = connection->ExecuteScalar( wxT("SELECT ") + schemaPrefix + wxT("slonyversion()")); if (clusterVersion != newVersion) { wxMessageDialog msg(this, wxString::Format(_("The newly created cluster version (%s)\n doesn't match the existing cluster's version (%s)"), newVersion.c_str(), clusterVersion.c_str()), _("Error while joining replication cluster"), wxICON_ERROR); msg.ShowModal(); done = false; } if (done) done = CopyTable(remoteConn, connection, schemaPrefix + wxT("sl_node")); if (done) done = CopyTable(remoteConn, connection, schemaPrefix + wxT("sl_path")); if (done) done = CopyTable(remoteConn, connection, schemaPrefix + wxT("sl_listen")); if (done) done = CopyTable(remoteConn, connection, schemaPrefix + wxT("sl_set")); if (done) done = CopyTable(remoteConn, connection, schemaPrefix + wxT("sl_subscribe")); // make sure event seqno starts correctly after node reusage if (done) { pgSet *set = connection->ExecuteSet( wxT("SELECT ev_origin, MAX(ev_seqno) as seqno\n") wxT(" FROM ") + schemaPrefix + wxT("sl_event\n") wxT(" GROUP BY ev_origin")); if (set) { while (done && !set->Eof()) { if (set->GetVal(wxT("ev_origin")) == txtNodeID->GetValue()) { done = connection->ExecuteVoid( wxT("SELECT pg_catalog.setval(") + qtDbString(wxT("_") + cbClusterName->GetValue() + wxT(".sl_event_seq")) + wxT(", ") + set->GetVal(wxT("seqno")) + wxT("::int8 +1)")); } else { done = connection->ExecuteVoid( wxT("INSERT INTO ") + schemaPrefix + wxT("sl_confirm(con_origin, con_received, con_seqno, con_timestamp\n") wxT(" VALUES (") + set->GetVal(wxT("ev_origin")) + wxT(", ") + txtNodeID->GetValue() + wxT(", ") + set->GetVal(wxT("seqno")) + wxT(", current_timestamp")); } set->MoveNext(); } delete set; } } // make sure rowid seq starts correctly if (done) { wxString seqno = connection->ExecuteScalar( wxT("SELECT MAX(seql_last_value)\n") wxT(" FROM ") + schemaPrefix + wxT("sl_seqlog\n") wxT(" WHERE seql_seqid = 0 AND seql_origin = ") + txtNodeID->GetValue()); if (!seqno.IsEmpty()) { done = connection->ExecuteVoid( wxT("SELECT pg_catalog.setval(") + qtDbString(wxT("_") + cbClusterName->GetValue() + wxT(".sl_rowid_seq")) + wxT(", ") + seqno + wxT(")")); } } // create new node on the existing cluster if (done) { wxString sql = wxT("SELECT ") + schemaPrefix + wxT("storenode(") + txtNodeID->GetValue() + wxT(", ") + qtDbString(txtNodeName->GetValue()); if (StrToDouble(remoteVersion) >= 1.1 && StrToDouble(remoteVersion) < 2.0) sql += wxT(", false"); sql += wxT(");\n") wxT("SELECT ") + schemaPrefix + wxT("enablenode(") + txtNodeID->GetValue() + wxT(");\n"); done = remoteConn->ExecuteVoid(sql); } // add admin info to cluster if (done && cbAdminNode->GetCurrentSelection() > 0) { done = remoteConn->ExecuteVoid( wxT("SELECT ") + schemaPrefix + wxT("storepath(") + txtNodeID->GetValue() + wxT(", ") + NumToStr((long)cbAdminNode->wxItemContainer::GetClientData(cbAdminNode->GetCurrentSelection())) + wxT(", ") + qtDbString(wxT("host=") + database->GetServer()->GetName() + wxT(" port=") + NumToStr((long)database->GetServer()->GetPort()) + wxT(" dbname=") + database->GetName()) + wxT(", ") wxT("0);\n")); } } if (!done) { if (remoteConn) done = remoteConn->ExecuteVoid(wxT("ROLLBACK TRANSACTION;")); done = connection->ExecuteVoid(wxT("ROLLBACK TRANSACTION;")); EnableOK(true); return; } if (remoteConn) done = remoteConn->ExecuteVoid(wxT("COMMIT TRANSACTION;")); done = connection->ExecuteVoid(wxT("COMMIT TRANSACTION;")); ShowObject(); Destroy(); }