JsonNodeT *JsonParse(const char *json) { JsonNodeT *node = NULL; int num = 0; LOG("Lexing JSON."); if (CountTokens(json, &num)) { ParserT parser; /* read tokens into an array */ TokenT *tokens = ReadTokens(json, num); /* now... parse! */ ParserInit(&parser, tokens, num); LOG("Parsing JSON."); if (!ParseValue(&parser, &node)) { #ifdef DEBUG_LEXER LOG("Parse error: %s at token ", parser.errmsg); TokenPrint(&parser.tokens[parser.pos]); #else LOG("Parse error: %s at position %d.", parser.errmsg, parser.tokens[parser.pos].pos); #endif MemUnref(node); node = NULL; } else { LOG("Parsing finished."); } MemUnref(tokens); } return node; }
void LoadMapFile(void) { char *buf; int i, j, length; void *pTemp; struct lumpdata *texinfo; Message(msgProgress, "LoadMapFile"); length = LoadFile(options.szMapName, (void *)&buf, true); PreParseFile(buf); ParserInit(buf); // Faces are loaded in reverse order, to be compatible with origqbsp. // Brushes too. map.iFaces = map.cFaces - 1; map.iBrushes = map.cBrushes - 1; map.iEntities = 0; pCurEnt = &map.rgEntities[0]; while (ParseEntity(pCurEnt)) { map.iEntities++; pCurEnt++; } FreeMem(buf, OTHER, length + 1); // Print out warnings for entities if (!(rgfStartSpots & info_player_start)) Message(msgWarning, warnNoPlayerStart); if (!(rgfStartSpots & info_player_deathmatch)) Message(msgWarning, warnNoPlayerDeathmatch); // if (!(rgfStartSpots & info_player_coop)) // Message(msgWarning, warnNoPlayerCoop); // Clean up texture memory if (cMiptex > map.cFaces) Message(msgError, errLowMiptexCount); else if (cMiptex < map.cFaces) { // For stuff in AddAnimatingTex, make room available pTemp = (void *)rgszMiptex; rgszMiptex = AllocMem(MIPTEX, cMiptex + cAnimtex * 20, true); memcpy(rgszMiptex, pTemp, cMiptex * rgcMemSize[MIPTEX]); FreeMem(pTemp, MIPTEX, map.cFaces); } texinfo = &pWorldEnt->lumps[BSPTEXINFO]; if (texinfo->index > texinfo->count) Message(msgError, errLowTexinfoCount); else if (texinfo->index < texinfo->count) { pTemp = texinfo->data; texinfo->data = AllocMem(BSPTEXINFO, texinfo->index, true); memcpy(texinfo->data, pTemp, texinfo->index * rgcMemSize[BSPTEXINFO]); FreeMem(pTemp, BSPTEXINFO, texinfo->count); texinfo->count = texinfo->index; } // One plane per face + 6 for portals cPlanes = map.cFaces + 6; // Count # of unique planes for (i = 0; i < map.cFaces; i++) { map.rgFaces[i].fUnique = true; for (j = 0; j < i; j++) if (map.rgFaces[j].fUnique && VectorCompare(map.rgFaces[i].plane.normal, map.rgFaces[j].plane.normal) && fabs(map.rgFaces[i].plane.dist - map.rgFaces[j].plane.dist) < EQUAL_EPSILON) { map.rgFaces[i].fUnique = false; cPlanes--; break; } } // Now iterate through brushes, add one plane for each face below 6 axis aligned faces. // This compensates for planes added in ExpandBrush. int cAxis; for (i = 0; i < map.cBrushes; i++) { cAxis = 0; for (j = map.rgBrushes[i].iFaceStart; j < map.rgBrushes[i].iFaceEnd; j++) { if (fabs(map.rgFaces[j].plane.normal[0]) > 1 - NORMAL_EPSILON || fabs(map.rgFaces[j].plane.normal[1]) > 1 - NORMAL_EPSILON || fabs(map.rgFaces[j].plane.normal[2]) > 1 - NORMAL_EPSILON) cAxis++; } if (6 - cAxis > 0) cPlanes += 6 - cAxis; } // cPlanes*3 because of 3 hulls, then add 20% as a fudge factor for hull edge bevel planes cPlanes = 3 * cPlanes + cPlanes / 5; pPlanes = AllocMem(PLANE, cPlanes, true); Message(msgStat, "%5i faces", map.cFaces); Message(msgStat, "%5i brushes", map.cBrushes); Message(msgStat, "%5i entities", map.cEntities); Message(msgStat, "%5i unique texnames", cMiptex); Message(msgStat, "%5i texinfo", texinfo->count); Message(msgLiteral, "\n"); }
/** * @brief Entry point of the user-defined function for pg_bulkload. * @return Returns number of loaded tuples. If the case of errors, -1 will be * returned. */ Datum pg_bulkload(PG_FUNCTION_ARGS) { Reader *rd = NULL; Writer *wt = NULL; Datum options; MemoryContext ctx; MemoryContext ccxt; PGRUsage ru0; PGRUsage ru1; int64 count; int64 parse_errors; int64 skip; WriterResult ret; char *start; char *end; float8 system; float8 user; float8 duration; TupleDesc tupdesc; Datum values[PG_BULKLOAD_COLS]; bool nulls[PG_BULKLOAD_COLS]; HeapTuple result; /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); BULKLOAD_PROFILE_PUSH(); pg_rusage_init(&ru0); /* must be the super user */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to use pg_bulkload"))); options = PG_GETARG_DATUM(0); ccxt = CurrentMemoryContext; /* * STEP 1: Initialization */ /* parse options and create reader and writer */ ParseOptions(options, &rd, &wt, ru0.tv.tv_sec); /* initialize reader */ ReaderInit(rd); /* * We need to split PG_TRY block because gcc optimizes if-branches with * longjmp codes too much. Local variables initialized in either branch * cannot be handled another branch. */ PG_TRY(); { /* truncate heap */ if (wt->truncate) TruncateTable(wt->relid); /* initialize writer */ WriterInit(wt); /* initialize checker */ CheckerInit(&rd->checker, wt->rel, wt->tchecker); /* initialize parser */ ParserInit(rd->parser, &rd->checker, rd->infile, wt->desc, wt->multi_process, PG_GET_COLLATION()); } PG_CATCH(); { if (rd) ReaderClose(rd, true); if (wt) WriterClose(wt, true); PG_RE_THROW(); } PG_END_TRY(); /* No throwable codes here! */ PG_TRY(); { /* create logger */ CreateLogger(rd->logfile, wt->verbose, rd->infile[0] == ':'); start = timeval_to_cstring(ru0.tv); LoggerLog(INFO, "\npg_bulkload %s on %s\n\n", PG_BULKLOAD_VERSION, start); ReaderDumpParams(rd); WriterDumpParams(wt); LoggerLog(INFO, "\n"); BULKLOAD_PROFILE(&prof_init); /* * STEP 2: Build heap */ /* Switch into its memory context */ Assert(wt->context); ctx = MemoryContextSwitchTo(wt->context); /* Loop for each input file record. */ while (wt->count < rd->limit) { HeapTuple tuple; CHECK_FOR_INTERRUPTS(); /* read tuple */ BULKLOAD_PROFILE_PUSH(); tuple = ReaderNext(rd); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_reader); if (tuple == NULL) break; /* write tuple */ BULKLOAD_PROFILE_PUSH(); WriterInsert(wt, tuple); wt->count += 1; BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_writer); MemoryContextReset(wt->context); BULKLOAD_PROFILE(&prof_reset); } MemoryContextSwitchTo(ctx); /* * STEP 3: Finalize heap and merge indexes */ count = wt->count; parse_errors = rd->parse_errors; /* * close writer first and reader second because shmem_exit callback * is managed by a simple stack. */ ret = WriterClose(wt, false); wt = NULL; skip = ReaderClose(rd, false); rd = NULL; } PG_CATCH(); { ErrorData *errdata; MemoryContext ecxt; ecxt = MemoryContextSwitchTo(ccxt); errdata = CopyErrorData(); LoggerLog(INFO, "%s\n", errdata->message); FreeErrorData(errdata); /* close writer first, and reader second */ if (wt) WriterClose(wt, true); if (rd) ReaderClose(rd, true); MemoryContextSwitchTo(ecxt); PG_RE_THROW(); } PG_END_TRY(); count -= ret.num_dup_new; LoggerLog(INFO, "\n" " " int64_FMT " Rows skipped.\n" " " int64_FMT " Rows successfully loaded.\n" " " int64_FMT " Rows not loaded due to parse errors.\n" " " int64_FMT " Rows not loaded due to duplicate errors.\n" " " int64_FMT " Rows replaced with new rows.\n\n", skip, count, parse_errors, ret.num_dup_new, ret.num_dup_old); pg_rusage_init(&ru1); system = diffTime(ru1.ru.ru_stime, ru0.ru.ru_stime); user = diffTime(ru1.ru.ru_utime, ru0.ru.ru_utime); duration = diffTime(ru1.tv, ru0.tv); end = timeval_to_cstring(ru1.tv); memset(nulls, 0, sizeof(nulls)); values[0] = Int64GetDatum(skip); values[1] = Int64GetDatum(count); values[2] = Int64GetDatum(parse_errors); values[3] = Int64GetDatum(ret.num_dup_new); values[4] = Int64GetDatum(ret.num_dup_old); values[5] = Float8GetDatumFast(system); values[6] = Float8GetDatumFast(user); values[7] = Float8GetDatumFast(duration); LoggerLog(INFO, "Run began on %s\n" "Run ended on %s\n\n" "CPU %.2fs/%.2fu sec elapsed %.2f sec\n", start, end, system, user, duration); LoggerClose(); result = heap_form_tuple(tupdesc, values, nulls); BULKLOAD_PROFILE(&prof_fini); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE_PRINT(); PG_RETURN_DATUM(HeapTupleGetDatum(result)); }
void LoadMapFile(void) { parser_t parser; char *buf; int i, j, length, cAxis; void *pTemp; struct lumpdata *texinfo; mapentity_t *ent; mapbrush_t *brush; mapface_t *face, *face2; Message(msgProgress, "LoadMapFile"); length = LoadFile(options.szMapName, &buf, true); PreParseFile(buf); ParserInit(&parser, buf); map.numfaces = map.numbrushes = map.numentities = 0; ent = map.entities; while (ParseEntity(&parser, ent)) { /* Allocate memory for the bmodel, if needed. */ const char *classname = ValueForKey(ent, "classname"); if (strcmp(classname, "func_detail") && ent->nummapbrushes) { ent->lumps[BSPMODEL].data = AllocMem(BSPMODEL, 1, true); ent->lumps[BSPMODEL].count = 1; } map.numentities++; ent++; } /* Double check the entity count matches our pre-parse count */ if (map.numentities != map.maxentities) Error(errLowEntCount); FreeMem(buf, OTHER, length + 1); // Print out warnings for entities if (!(rgfStartSpots & info_player_start)) Message(msgWarning, warnNoPlayerStart); if (!(rgfStartSpots & info_player_deathmatch)) Message(msgWarning, warnNoPlayerDeathmatch); // if (!(rgfStartSpots & info_player_coop)) // Message(msgWarning, warnNoPlayerCoop); // Clean up texture memory if (map.nummiptex > map.maxfaces) Error(errLowMiptexCount); else if (map.nummiptex < map.maxfaces) { // For stuff in AddAnimatingTex, make room available pTemp = map.miptex; map.maxmiptex = map.nummiptex + cAnimtex * 20; map.miptex = AllocMem(MIPTEX, map.maxmiptex, true); memcpy(map.miptex, pTemp, map.nummiptex * rgcMemSize[MIPTEX]); FreeMem(pTemp, MIPTEX, map.maxfaces); } texinfo = &pWorldEnt->lumps[BSPTEXINFO]; if (texinfo->index > texinfo->count) Error(errLowTexinfoCount); else if (texinfo->index < texinfo->count) { pTemp = texinfo->data; texinfo->data = AllocMem(BSPTEXINFO, texinfo->index, true); memcpy(texinfo->data, pTemp, texinfo->index * rgcMemSize[BSPTEXINFO]); FreeMem(pTemp, BSPTEXINFO, texinfo->count); texinfo->count = texinfo->index; } // One plane per face + 6 for portals map.maxplanes = map.numfaces + 6; // Count # of unique planes in all of the faces for (i = 0, face = map.faces; i < map.numfaces; i++, face++) { face->fUnique = true; for (j = 0, face2 = map.faces; j < i; j++, face2++) { if (face2->fUnique && VectorCompare(face->plane.normal, face2->plane.normal) && fabs(face->plane.dist - face2->plane.dist) < EQUAL_EPSILON) { face->fUnique = false; map.maxplanes--; break; } } } /* * Now iterate through brushes, add one plane for each face below 6 axis * aligned faces. This compensates for planes added in ExpandBrush. */ for (i = 0, brush = map.brushes; i < map.numbrushes; i++, brush++) { cAxis = 0; for (j = 0, face = brush->faces; j < brush->numfaces; j++, face++) { if (fabs(face->plane.normal[0]) > 1 - NORMAL_EPSILON || fabs(face->plane.normal[1]) > 1 - NORMAL_EPSILON || fabs(face->plane.normal[2]) > 1 - NORMAL_EPSILON) cAxis++; } if (6 - cAxis > 0) map.maxplanes += 6 - cAxis; } /* * map.maxplanes*3 because of 3 hulls, then add 20% as a fudge factor for * hull edge bevel planes */ map.maxplanes = 3 * map.maxplanes + map.maxplanes / 5; map.planes = AllocMem(PLANE, map.maxplanes, true); Message(msgStat, "%5i faces", map.numfaces); Message(msgStat, "%5i brushes", map.numbrushes); Message(msgStat, "%5i entities", map.numentities); Message(msgStat, "%5i unique texnames", map.nummiptex); Message(msgStat, "%5i texinfo", texinfo->count); Message(msgLiteral, "\n"); }