cWSSCompact::cPAKFile::~cPAKFile() { if (m_NumDirty > 0) { SynchronizeFile(); } for (sChunkHeaders::iterator itr = m_ChunkHeaders.begin(); itr != m_ChunkHeaders.end(); ++itr) { delete *itr; } }
bool cWSSCompact::cPAKFile::SaveChunk(const cChunkCoords & a_Chunk, cWorld * a_World) { if (!SaveChunkToData(a_Chunk, a_World)) { return false; } if (m_NumDirty > MAX_DIRTY_CHUNKS) { SynchronizeFile(); } return true; }
static void CheckUpdates(void) { FILE *fi; char buf[256]; fi = fopen(CRONUPDATE, "r"); if (fi != NULL) { remove(CRONUPDATE); while (fgets(buf, sizeof(buf), fi) != NULL) { SynchronizeFile(strtok(buf, " \t\r\n")); } fclose(fi); } }
static void CheckUpdates(void) { FILE *fi; char buf[256]; fi = fopen(CRONUPDATE, "r"); if (fi != NULL) { unlink(CRONUPDATE); while (fgets(buf, sizeof(buf), fi) != NULL) { /* use first word only */ SynchronizeFile(strtok(buf, " \t\r\n")); } fclose(fi); } }
static void SynchronizeDir(void) { /* Attempt to delete the database. */ for (;;) { CronFile *file; for (file = FileBase; file && file->cf_Deleted; file = file->cf_Next); if (file == NULL) { break; } DeleteFile(file->cf_User); } /* * Remove cron update file * * Re-chdir, in case directory was renamed & deleted, or otherwise * screwed up. * * scan directory and add associated users */ remove(CRONUPDATE); if (chdir(CDir) < 0) { crondlog("\311unable to find %s\n", CDir); } { DIR *dir = opendir("."); struct dirent *den; if (dir) { while ((den = readdir(dir))) { if (strchr(den->d_name, '.') != NULL) { continue; } if (getpwnam(den->d_name)) { SynchronizeFile(den->d_name); } else { crondlog("\007ignoring %s\n", den->d_name); } } closedir(dir); } else { crondlog("\311Unable to open current dir!\n"); } } }
static void SynchronizeDir(void) { CronFile *file; /* Attempt to delete the database. */ again: for (file = FileBase; file; file = file->cf_Next) { if (!file->cf_Deleted) { DeleteFile(file->cf_User); goto again; } } /* * Remove cron update file * * Re-chdir, in case directory was renamed & deleted, or otherwise * screwed up. * * scan directory and add associated users */ unlink(CRONUPDATE); if (chdir(CDir) < 0) { crondlog(DIE9 "can't chdir(%s)", CDir); } { DIR *dir = opendir("."); struct dirent *den; if (!dir) crondlog(DIE9 "can't chdir(%s)", "."); /* exits */ while ((den = readdir(dir)) != NULL) { if (strchr(den->d_name, '.') != NULL) { continue; } if (getpwnam(den->d_name)) { SynchronizeFile(den->d_name); } else { crondlog(LVL7 "ignoring %s", den->d_name); } } closedir(dir); } }
void cWSSCompact::cPAKFile::UpdateChunk2To3() { int Offset = 0; AString NewDataContents; int ChunksConverted = 0; for (sChunkHeaders::iterator itr = m_ChunkHeaders.begin(); itr != m_ChunkHeaders.end(); ++itr) { sChunkHeader * Header = *itr; if( ChunksConverted % 32 == 0 ) { LOGINFO("Updating \"%s\" version 2 to version 3: " SIZE_T_FMT " %%", m_FileName.c_str(), (ChunksConverted * 100) / m_ChunkHeaders.size() ); } ChunksConverted++; AString Data; int UncompressedSize = Header->m_UncompressedSize; Data.assign(m_DataContents, Offset, Header->m_CompressedSize); Offset += Header->m_CompressedSize; // Crude data integrity check: const int ExpectedSize = (16*256*16)*2 + (16*256*16)/2; // For version 2 if (UncompressedSize < ExpectedSize) { LOGWARNING("Chunk [%d, %d] has too short decompressed data (%d bytes out of %d needed), erasing", Header->m_ChunkX, Header->m_ChunkZ, UncompressedSize, ExpectedSize ); Offset += Header->m_CompressedSize; continue; } // Decompress the data: AString UncompressedData; { int errorcode = UncompressString(Data.data(), Data.size(), UncompressedData, UncompressedSize); if (errorcode != Z_OK) { LOGERROR("Error %d decompressing data for chunk [%d, %d]", errorcode, Header->m_ChunkX, Header->m_ChunkZ ); Offset += Header->m_CompressedSize; continue; } } if (UncompressedSize != (int)UncompressedData.size()) { LOGWARNING("Uncompressed data size differs (exp %d bytes, got " SIZE_T_FMT ") for chunk [%d, %d]", UncompressedSize, UncompressedData.size(), Header->m_ChunkX, Header->m_ChunkZ ); Offset += Header->m_CompressedSize; continue; } char ConvertedData[ExpectedSize]; memset(ConvertedData, 0, ExpectedSize); // Cannot use cChunk::MakeIndex because it might change again????????? // For compatibility, use what we know is current #define MAKE_3_INDEX( x, y, z ) ( x + (z * 16) + (y * 16 * 16) ) unsigned int InChunkOffset = 0; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) for( int y = 0; y < 256; ++y ) // YZX Loop order is important, in 1.1 Y was first then Z then X { ConvertedData[ MAKE_3_INDEX(x, y, z) ] = UncompressedData[InChunkOffset]; ++InChunkOffset; } // for y, z, x unsigned int index2 = 0; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) for( int y = 0; y < 256; ++y ) { ConvertedData[ InChunkOffset + MAKE_3_INDEX(x, y, z)/2 ] |= ( (UncompressedData[ InChunkOffset + index2/2 ] >> ((index2&1)*4) ) & 0x0f ) << ((x&1)*4); ++index2; } InChunkOffset += index2 / 2; index2 = 0; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) for( int y = 0; y < 256; ++y ) { ConvertedData[ InChunkOffset + MAKE_3_INDEX(x, y, z)/2 ] |= ( (UncompressedData[ InChunkOffset + index2/2 ] >> ((index2&1)*4) ) & 0x0f ) << ((x&1)*4); ++index2; } InChunkOffset += index2 / 2; index2 = 0; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) for( int y = 0; y < 256; ++y ) { ConvertedData[ InChunkOffset + MAKE_3_INDEX(x, y, z)/2 ] |= ( (UncompressedData[ InChunkOffset + index2/2 ] >> ((index2&1)*4) ) & 0x0f ) << ((x&1)*4); ++index2; } InChunkOffset += index2 / 2; AString Converted(ConvertedData, ExpectedSize); // Add JSON data afterwards if (UncompressedData.size() > InChunkOffset) { Converted.append( UncompressedData.begin() + InChunkOffset, UncompressedData.end() ); } // Re-compress data AString CompressedData; { int errorcode = CompressString(Converted.data(), Converted.size(), CompressedData, m_CompressionFactor); if (errorcode != Z_OK) { LOGERROR("Error %d compressing data for chunk [%d, %d]", errorcode, Header->m_ChunkX, Header->m_ChunkZ ); continue; } } // Save into file's cache Header->m_UncompressedSize = Converted.size(); Header->m_CompressedSize = CompressedData.size(); NewDataContents.append( CompressedData ); } // Done converting m_DataContents = NewDataContents; m_ChunkVersion = 3; SynchronizeFile(); LOGINFO("Updated \"%s\" version 2 to version 3", m_FileName.c_str() ); }
void cWSSCompact::cPAKFile::UpdateChunk1To2() { int Offset = 0; AString NewDataContents; int ChunksConverted = 0; for (sChunkHeaders::iterator itr = m_ChunkHeaders.begin(); itr != m_ChunkHeaders.end(); ++itr) { sChunkHeader * Header = *itr; if( ChunksConverted % 32 == 0 ) { LOGINFO("Updating \"%s\" version 1 to version 2: " SIZE_T_FMT " %%", m_FileName.c_str(), (ChunksConverted * 100) / m_ChunkHeaders.size() ); } ChunksConverted++; AString Data; int UncompressedSize = Header->m_UncompressedSize; Data.assign(m_DataContents, Offset, Header->m_CompressedSize); Offset += Header->m_CompressedSize; // Crude data integrity check: int ExpectedSize = (16*128*16)*2 + (16*128*16)/2; // For version 1 if (UncompressedSize < ExpectedSize) { LOGWARNING("Chunk [%d, %d] has too short decompressed data (%d bytes out of %d needed), erasing", Header->m_ChunkX, Header->m_ChunkZ, UncompressedSize, ExpectedSize ); Offset += Header->m_CompressedSize; continue; } // Decompress the data: AString UncompressedData; { int errorcode = UncompressString(Data.data(), Data.size(), UncompressedData, UncompressedSize); if (errorcode != Z_OK) { LOGERROR("Error %d decompressing data for chunk [%d, %d]", errorcode, Header->m_ChunkX, Header->m_ChunkZ ); Offset += Header->m_CompressedSize; continue; } } if (UncompressedSize != (int)UncompressedData.size()) { LOGWARNING("Uncompressed data size differs (exp %d bytes, got " SIZE_T_FMT ") for chunk [%d, %d]", UncompressedSize, UncompressedData.size(), Header->m_ChunkX, Header->m_ChunkZ ); Offset += Header->m_CompressedSize; continue; } // Old version is 128 blocks high with YZX axis order char ConvertedData[cChunkDef::BlockDataSize]; int Index = 0; unsigned int InChunkOffset = 0; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) { for( int y = 0; y < 128; ++y ) { ConvertedData[Index++] = UncompressedData[y + z * 128 + x * 128 * 16 + InChunkOffset]; } // Add 128 empty blocks after an old y column memset(ConvertedData + Index, E_BLOCK_AIR, 128); Index += 128; } InChunkOffset += (16 * 128 * 16); for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) // Metadata { for( int y = 0; y < 64; ++y ) { ConvertedData[Index++] = UncompressedData[y + z * 64 + x * 64 * 16 + InChunkOffset]; } memset(ConvertedData + Index, 0, 64); Index += 64; } InChunkOffset += (16 * 128 * 16) / 2; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) // Block light { for( int y = 0; y < 64; ++y ) { ConvertedData[Index++] = UncompressedData[y + z * 64 + x * 64 * 16 + InChunkOffset]; } memset(ConvertedData + Index, 0, 64); Index += 64; } InChunkOffset += (16*128*16)/2; for( int x = 0; x < 16; ++x ) for( int z = 0; z < 16; ++z ) // Sky light { for( int y = 0; y < 64; ++y ) { ConvertedData[Index++] = UncompressedData[y + z * 64 + x * 64 * 16 + InChunkOffset]; } memset(ConvertedData + Index, 0, 64); Index += 64; } InChunkOffset += (16 * 128 * 16) / 2; AString Converted(ConvertedData, ARRAYCOUNT(ConvertedData)); // Add JSON data afterwards if (UncompressedData.size() > InChunkOffset) { Converted.append( UncompressedData.begin() + InChunkOffset, UncompressedData.end() ); } // Re-compress data AString CompressedData; { int errorcode = CompressString(Converted.data(), Converted.size(), CompressedData,m_CompressionFactor); if (errorcode != Z_OK) { LOGERROR("Error %d compressing data for chunk [%d, %d]", errorcode, Header->m_ChunkX, Header->m_ChunkZ ); continue; } } // Save into file's cache Header->m_UncompressedSize = Converted.size(); Header->m_CompressedSize = CompressedData.size(); NewDataContents.append( CompressedData ); } // Done converting m_DataContents = NewDataContents; m_ChunkVersion = 2; SynchronizeFile(); LOGINFO("Updated \"%s\" version 1 to version 2", m_FileName.c_str() ); }
/* * Check the cron.update file in the specified directory. If user_override * is NULL then the files in the directory belong to the user whose name is * the file, otherwise they belong to the user_override user. */ void CheckUpdates(const char *dpath, const char *user_override, time_t t1, time_t t2) { FILE *fi; char buf[SMALL_BUFFER]; char *fname, *ptok, *job; char *path; if (!(path = concat(dpath, "/", CRONUPDATE, NULL))) { errno = ENOMEM; perror("CheckUpdates"); exit(1); } if ((fi = fopen(path, "r")) != NULL) { remove(path); printlogf(LOG_INFO, "reading %s/%s\n", dpath, CRONUPDATE); while (fgets(buf, sizeof(buf), fi) != NULL) { /* * if buf has only sep chars, return NULL and point ptok at buf's terminating 0 * else return pointer to first non-sep of buf and * if there's a following sep, overwrite it to 0 and point ptok to next char * else point ptok at buf's terminating 0 */ fname = strtok_r(buf, " \t\n", &ptok); if (user_override) SynchronizeFile(dpath, fname, user_override); else if (!getpwnam(fname)) printlogf(LOG_WARNING, "ignoring %s/%s (non-existent user)\n", dpath, fname); else if (*ptok == 0 || *ptok == '\n') { SynchronizeFile(dpath, fname, fname); ReadTimestamps(fname); } else { /* if fname is followed by whitespace, we prod any following jobs */ CronFile *file = FileBase; while (file) { if (strcmp(file->cf_UserName, fname) == 0) break; file = file->cf_Next; } if (!file) printlogf(LOG_WARNING, "unable to prod for user %s: no crontab\n", fname); else { CronLine *line; /* calling strtok(ptok...) then strtok(NULL) is equiv to calling strtok_r(NULL,..&ptok) */ while ((job = strtok(ptok, " \t\n")) != NULL) { time_t force = t2; ptok = NULL; if (*job == '!') { force = (time_t)-1; ++job; } line = file->cf_LineBase; while (line) { if (line->cl_JobName && strcmp(line->cl_JobName, job) == 0) break; line = line->cl_Next; } if (line) ArmJob(file, line, t1, force); else { printlogf(LOG_WARNING, "unable to prod for user %s: unknown job %s\n", fname, job); /* we can continue parsing this line, we just don't install any CronWaiter for the requested job */ } } } } } fclose(fi); } free(path); }
void SynchronizeDir(const char *dpath, const char *user_override, int initial_scan) { CronFile **pfile; CronFile *file; struct dirent *den; DIR *dir; char *path; if (DebugOpt) printlogf(LOG_DEBUG, "Synchronizing %s\n", dpath); /* * Delete all database CronFiles for this directory. DeleteFile() will * free *pfile and relink the *pfile pointer, or in the alternative will * mark it as deleted. */ pfile = &FileBase; while ((file = *pfile) != NULL) { if (file->cf_Deleted == 0 && strcmp(file->cf_DPath, dpath) == 0) { DeleteFile(pfile); } else { pfile = &file->cf_Next; } } /* * Since we are resynchronizing the entire directory, remove the * the CRONUPDATE file. */ if (!(path = concat(dpath, "/", CRONUPDATE, NULL))) { errno = ENOMEM; perror("SynchronizeDir"); exit(1); } remove(path); free(path); /* * Scan the specified directory */ if ((dir = opendir(dpath)) != NULL) { while ((den = readdir(dir)) != NULL) { if (strchr(den->d_name, '.') != NULL) continue; if (strcmp(den->d_name, CRONUPDATE) == 0) continue; if (user_override) { SynchronizeFile(dpath, den->d_name, user_override); } else if (getpwnam(den->d_name)) { SynchronizeFile(dpath, den->d_name, den->d_name); } else { printlogf(LOG_WARNING, "ignoring %s/%s (non-existent user)\n", dpath, den->d_name); } } closedir(dir); } else { if (initial_scan) printlogf(LOG_ERR, "unable to scan directory %s\n", dpath); /* softerror, do not exit the program */ } }