nbt_node* get_raw_chunk(regionfile* region, int32_t cx, int32_t cz) { if (!region) return NULL; cx &= 0x1f; cz &= 0x1f; uint32_t offset = region->offsets[cx + cz * 32]; if (offset == 0) return NULL; uint32_t numSectors = offset & 0xff; if (numSectors == 0) return NULL; uint32_t sectorStart = offset >> 8; FILE* f; if (region->keepopen == 1) { if (region->file) f = region->file; else { f = fopen(region->filename, "rb+"); region->file = f; } } else f = fopen(region->filename, "rb+"); nbt_node* output = NULL; if (f && fseek(f, sectorStart*SECTOR_BYTES, SEEK_SET) == 0) { unsigned char buf[numSectors*SECTOR_BYTES]; if (fread(buf, 1, sizeof(buf), f) == sizeof(buf)) { /* First 4 bytes are the length */ size_t size = be32toh((uint32_t) (buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24))); output = nbt_parse_compressed(buf+5, size); /* Fifth byte is the compression algorithm */ } /* But we don't need that as cNBT will figure that out */ } if (region->file != f) fclose(f); return output; };
void for_each_chunk_raw(regionfile* region, raw_chunk_func function, void* context) { if (!region || !function) return; FILE* f; if (region->keepopen == 1) { if (region->file) f = region->file; else { f = fopen(region->filename, "rb+"); region->file = f; } } else f = fopen(region->filename, "rb+"); size_t i; for (i = 0; i < SECTOR_INTS; i++) { uint32_t offset = region->offsets[i]; if (offset == 0) continue; uint32_t numSectors = offset & 0xff; if (numSectors == 0) continue; uint32_t sectorStart = offset >> 8; if (f && fseek(f, sectorStart*SECTOR_BYTES, SEEK_SET) == 0) { unsigned char buf[numSectors*SECTOR_BYTES]; if (fread(buf, 1, sizeof(buf), f) == sizeof(buf)) { size_t size = be32toh((uint32_t) (buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24))); nbt_node* node = nbt_parse_compressed(buf+5, size); function(node, context); nbt_free(node); } } } if (region->file != f) fclose(f); };
nbt_node *mcr_chunk_get(MCR *mcr, int x, int z) { assert(mcr && x < 32 && z < 32 && x >= 0 && z >= 0); struct MCRChunk *chunk = &mcr->chunk[x][z]; if (chunk->data == NULL) { errno = NBT_OK; return NULL; } return nbt_parse_compressed(chunk->data+1, chunk->len-1); }
/* * No incremental parsing goes on. We just dump the whole compressed file into * memory then pass the job off to nbt_parse_chunk. */ nbt_node* nbt_parse_file(FILE* fp) { errno = NBT_OK; struct buffer compressed = read_file(fp); if(compressed.data == NULL) return NULL; nbt_node* ret = nbt_parse_compressed(compressed.data, compressed.len); buffer_free(&compressed); return ret; }
/* * Reads in uncompressed data and returns a buffer with the $(strat)-compressed * data within. Returns a NULL buffer on failure, and sets errno appropriately. */ static struct buffer __compress(const void* mem, size_t len, nbt_compression_strategy strat) { struct buffer ret; BUFFER_INIT_(&ret); errno = NBT_OK; z_stream stream = { .zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .next_in = Z_NULL,//(void*)mem, .avail_in = 0//len }; stream.next_in = (void*)mem; stream.avail_in = 0; /* "The default value is 15"... */ int windowbits = 15; /* ..."Add 16 to windowBits to write a simple gzip header and trailer around * the compressed data instead of a zlib wrapper." */ if(strat == STRAT_GZIP) windowbits += 16; if(deflateInit2(&stream, Z_DEFAULT_COMPRESSION, Z_DEFLATED, windowbits, 8, Z_DEFAULT_STRATEGY ) != Z_OK) { errno = NBT_EZ; return ret; } assert(stream.avail_in == len); /* I'm not sure if zlib will clobber this */ do { if(buffer_reserve(&ret, ret.len + CHUNK_SIZE)) { errno = NBT_EMEM; goto compression_error; } stream.next_out = ret.data + ret.len; stream.avail_out = CHUNK_SIZE; if(deflate(&stream, Z_FINISH) == Z_STREAM_ERROR) goto compression_error; ret.len += CHUNK_SIZE - stream.avail_out; } while(stream.avail_out == 0); (void)deflateEnd(&stream); return ret; compression_error: if(errno == NBT_OK) errno = NBT_EZ; (void)deflateEnd(&stream); buffer_free(&ret); BUFFER_INIT_(&ret); return ret; } /* * Reads in zlib-compressed data, and returns a buffer with the decompressed * data within. Returns a NULL buffer on failure, and sets errno appropriately. */ static struct buffer __decompress(const void* mem, size_t len) { struct buffer ret; BUFFER_INIT_(&ret); errno = NBT_OK; z_stream stream = { .zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .next_in = Z_NULL,//(void*)mem, .avail_in = 0//len }; stream.next_in = (void*)mem; stream.avail_in = len; /* "Add 32 to windowBits to enable zlib and gzip decoding with automatic * header detection" */ if(inflateInit2(&stream, 15 + 32) != Z_OK) { errno = NBT_EZ; return ret; } int zlib_ret; do { if(buffer_reserve(&ret, ret.len + CHUNK_SIZE)) { errno = NBT_EMEM; goto decompression_error; } stream.avail_out = CHUNK_SIZE; stream.next_out = (unsigned char*)ret.data + ret.len; switch((zlib_ret = inflate(&stream, Z_NO_FLUSH))) { case Z_MEM_ERROR: errno = NBT_EMEM; /* fall through */ case Z_DATA_ERROR: case Z_NEED_DICT: goto decompression_error; default: /* update our buffer length to reflect the new data */ ret.len += CHUNK_SIZE - stream.avail_out; } } while(stream.avail_out == 0); /* * If we're at the end of the input data, we'd sure as hell be at the end * of the zlib stream. */ if(zlib_ret != Z_STREAM_END) goto decompression_error; (void)inflateEnd(&stream); return ret; decompression_error: if(errno == NBT_OK) errno = NBT_EZ; (void)inflateEnd(&stream); buffer_free(&ret); BUFFER_INIT_(&ret); return ret; } /* * No incremental parsing goes on. We just dump the whole compressed file into * memory then pass the job off to nbt_parse_chunk. */ nbt_node* nbt_parse_file(FILE* fp) { errno = NBT_OK; struct buffer compressed = read_file(fp); if(compressed.data == NULL) return NULL; nbt_node* ret = nbt_parse_compressed(compressed.data, compressed.len); buffer_free(&compressed); return ret; } nbt_node* nbt_parse_path(const char* filename) { FILE* fp = fopen(filename, "rb"); if(fp == NULL) { errno = NBT_EIO; return NULL; } nbt_node* r = nbt_parse_file(fp); fclose(fp); return r; }