// used for resolving case-insensative paths to case-sensative ones // when appropriate IsCaseSensative() = true bool PlatformNormalize(std::string &normpath, const std::string &path, const std::string root) { if (!FileList::IsCaseSensative()) // windows simply returns identity { // should only get hit on windows if (root[root.length()-1] != '\\') { normpath = root + "\\" + path; } else { normpath = root + path; } return true; } // else it's a *nix system - normalize it to / instead of \ - FileList curdir; normpath = root; // break path down into it's componants std::vector<std::string> parts; Expand(path, '/', parts); bool found; for (size_t i = 0; i < parts.size(); i++) { curdir.GetList(normpath); found = false; for (size_t j = 0; j < curdir.Size(); j++) { if (strLower(std::string(curdir[j])) == strLower(std::string(parts[i]))) { normpath += std::string("/") + curdir[j]; found = true; break; } } if (!found) return false; //yikes curdir.Clear(); } return true; }
PluginReceiveResult AutopatcherServer::OnGetPatch(Packet *packet) { RakNet::BitStream inBitStream(packet->data, packet->length, false); ThreadData threadData; inBitStream.IgnoreBits(8); inBitStream.Read(threadData.setId); double lastUpdateDate; inBitStream.Read(lastUpdateDate); inBitStream.ReadCompressed(threadData.applicationName); threadData.clientList=0; // Check in-memory cache, use if possible rather than accessing database if (threadData.applicationName==cache_appName && lastUpdateDate!=0 && cacheLoaded && cache_minTime!=0 && lastUpdateDate>cache_minTime) { threadData.systemAddress=packet->systemAddress; threadData.server=this; threadData.clientList=RakNet::OP_NEW<FileList>( _FILE_AND_LINE_ ); if (threadData.clientList->Deserialize(&inBitStream)==false) { RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } if (threadData.clientList->fileList.Size()==0) { RakAssert(0); RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } char *userHash; RakNet::RakString userFilename; FileList patchList; unsigned int i,j; for (i=0; i < threadData.clientList->fileList.Size(); i++) { userHash=threadData.clientList->fileList[i].data; userFilename=threadData.clientList->fileList[i].filename; bool sentAnything=false; // If the user file has a hash, check this hash against the hash stored with the patch, for the file of the same name if (userHash) { if (threadData.clientList->fileList[i].dataLengthBytes!=HASH_LENGTH) { RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } for (j=0; j < cache_patchedFiles.fileList.Size(); j++) { if (userFilename == cache_patchedFiles.fileList[j].filename) { if (memcmp(cache_patchedFiles.fileList[j].data, userHash, HASH_LENGTH)!=0) { // Full file will be sent below } else { // Send patch RakAssert(cache_patchedFiles.fileList[j].context.op==PC_HASH_2_WITH_PATCH); patchList.AddFile(userFilename,userFilename, 0, cache_patchedFiles.fileList[j].dataLengthBytes, cache_patchedFiles.fileList[j].fileLengthBytes, cache_patchedFiles.fileList[j].context, true, false); sentAnything=true; } break; } } } if (sentAnything==false) { RakAssert(userFilename == cache_updatedFiles.fileList[j].filename); patchList.AddFile(userFilename,userFilename, 0, cache_updatedFiles.fileList[j].dataLengthBytes, cache_updatedFiles.fileList[j].fileLengthBytes, cache_updatedFiles.fileList[j].context, true, false); sentAnything=true; break; } if (sentAnything==false) { // Failure to find file in cache // Will fall to use database patchList.Clear(); break; } } if (patchList.fileList.Size()>0) { IncrementPatchingUserCount(); fileListTransfer->Send(&patchList, 0, packet->systemAddress, threadData.setId, priority, orderingChannel, this, 262144*4*4); RakNet::BitStream bitStream1; bitStream1.Write((unsigned char) ID_AUTOPATCHER_FINISHED_INTERNAL); double t =(double) time(NULL); bitStream1.Write(t); SendUnified(&bitStream1, priority, RELIABLE_ORDERED, orderingChannel, packet->systemAddress, false); RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } } RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); if (PatchingUserLimitReached()) { AddToWaitingQueue(packet); return RR_STOP_PROCESSING; } OnGetPatchInt(packet); return RR_STOP_PROCESSING_AND_DEALLOCATE; }
bool AutopatcherMySQLRepository::UpdateApplicationFiles(const char *applicationName, const char *applicationDirectory, const char *userName, FileListProgress *cb) { MYSQL_STMT *stmt; MYSQL_BIND bind[3]; char query[512]; FileList filesOnHarddrive; filesOnHarddrive.SetCallback(cb); int prepareResult; my_bool falseVar=false; RakNet::RakString escapedApplicationName = GetEscapedString(applicationName); filesOnHarddrive.AddFilesFromDirectory(applicationDirectory,"", true, true, true, FileListNodeContext(0,0)); if (filesOnHarddrive.fileList.Size()==0) { sprintf(lastError,"ERROR: Can't find files at %s in UpdateApplicationFiles\n",applicationDirectory); return false; } sprintf(query, "SELECT applicationID FROM Applications WHERE applicationName='%s';", escapedApplicationName.C_String()); int applicationID; if (!ExecuteQueryReadInt(query, &applicationID)) { sprintf(lastError,"ERROR: %s not found in UpdateApplicationFiles\n",escapedApplicationName.C_String()); return false; } if (!ExecuteBlockingCommand("BEGIN;")) { return false; } sprintf(query, "UPDATE Applications SET changeSetId = changeSetId + 1 where applicationID=%i;", applicationID); if (!ExecuteBlockingCommand(query)) { Rollback (); return false; } int changeSetId = 0; sprintf(query, "SELECT changeSetId FROM Applications WHERE applicationID=%i;", applicationID); if (!ExecuteQueryReadInt(query, &changeSetId)) { Rollback (); return false; } // +1 was added in the update changeSetId--; // Gets all newest files sprintf(query, "SELECT filename, contentHash, createFile FROM FileVersionHistory " "JOIN (SELECT max(fileId) maxId FROM FileVersionHistory WHERE applicationId=%i GROUP BY fileName) MaxId " "ON FileVersionHistory.fileId = MaxId.maxId " "ORDER BY filename DESC;", applicationID); MYSQL_RES *result = 0; if (!ExecuteBlockingCommand(query, &result)) { Rollback(); return false; } DataStructures::List <FileInfo> newestFiles; MYSQL_ROW row; while ((row = mysql_fetch_row (result)) != 0) { FileInfo fi; fi.filename = row [0]; fi.createFile = (atoi (row [2]) != 0); if (fi.createFile) { RakAssert(mysql_fetch_lengths (result) [1] == HASH_LENGTH); // check the data is sensible memcpy (fi.contentHash, row [1], HASH_LENGTH); } newestFiles.Insert (fi); } mysql_free_result(result); FileList newFiles; // Loop through files on filesOnHarddrive // If the file in filesOnHarddrive does not exist in the query result, or if it does but the hash is different or non-existent, add this file to the create list for (unsigned fileListIndex=0; fileListIndex < filesOnHarddrive.fileList.Size(); fileListIndex++) { bool addFile=true; if (fileListIndex%10==0) printf("Hashing files %i/%i\n", fileListIndex+1, filesOnHarddrive.fileList.Size()); const char * hardDriveFilename=filesOnHarddrive.fileList[fileListIndex].filename; const char * hardDriveHash=filesOnHarddrive.fileList[fileListIndex].data; for (unsigned i = 0; i != newestFiles.Size (); ++i) { const FileInfo & fi = newestFiles [i]; if (_stricmp(hardDriveFilename, fi.filename)==0) { if (fi.createFile && memcmp(fi.contentHash, hardDriveHash, HASH_LENGTH)==0) { // File exists in database and is the same addFile=false; } break; } } // Unless set to false, file does not exist in query result or is different. if (addFile) { newFiles.AddFile(hardDriveFilename,hardDriveFilename, filesOnHarddrive.fileList[fileListIndex].data, filesOnHarddrive.fileList[fileListIndex].dataLengthBytes, filesOnHarddrive.fileList[fileListIndex].fileLengthBytes, FileListNodeContext(0,0), false); } } // Go through query results that are marked as create // If a file that is currently in the database is not on the harddrive, add it to the delete list FileList deletedFiles; for (unsigned i = 0; i != newestFiles.Size (); ++i) { const FileInfo & fi = newestFiles [i]; if (!fi.createFile) continue; // If already false don't mark false again. bool fileOnHarddrive=false; for (unsigned fileListIndex=0; fileListIndex < filesOnHarddrive.fileList.Size(); fileListIndex++) { const char * hardDriveFilename=filesOnHarddrive.fileList[fileListIndex].filename; //hardDriveHash=filesOnHarddrive.fileList[fileListIndex].data; if (_stricmp(hardDriveFilename, fi.filename)==0) { fileOnHarddrive=true; break; } } if (!fileOnHarddrive) deletedFiles.AddFile(fi.filename,fi.filename,0,0,0,FileListNodeContext(0,0), false); } // files on harddrive no longer needed. Free this memory since generating all the patches is memory intensive. filesOnHarddrive.Clear(); // For each file in the delete list add a row indicating file deletion for (unsigned fileListIndex=0; fileListIndex < deletedFiles.fileList.Size(); fileListIndex++) { if (fileListIndex%10==0) printf("Tagging deleted files %i/%i\n", fileListIndex+1, deletedFiles.fileList.Size()); sprintf(query, "INSERT INTO FileVersionHistory(applicationID, filename, createFile, changeSetID, userName) VALUES (%i, '%s', FALSE,%i,'%s');", applicationID, GetEscapedString(deletedFiles.fileList[fileListIndex].filename).C_String(), changeSetId, GetEscapedString(userName).C_String()); if (!ExecuteBlockingCommand (query)) { Rollback(); deletedFiles.Clear(); newFiles.Clear(); return false; } } // Clear the delete list as it is no longer needed. deletedFiles.Clear(); // For each file in the create list for (unsigned fileListIndex=0; fileListIndex < newFiles.fileList.Size(); fileListIndex++) { if (fileListIndex%10==0) printf("Adding file %i/%i\n", fileListIndex+1, newFiles.fileList.Size()); const char * hardDriveFilename=newFiles.fileList[fileListIndex].filename; const char * hardDriveData=newFiles.fileList[fileListIndex].data+HASH_LENGTH; const char * hardDriveHash=newFiles.fileList[fileListIndex].data; unsigned hardDriveDataLength=newFiles.fileList[fileListIndex].fileLengthBytes; sprintf( query, "SELECT fileID from FileVersionHistory WHERE applicationID=%i AND filename='%s' AND createFile=TRUE;", applicationID, GetEscapedString(hardDriveFilename).C_String() ); MYSQL_RES * res = 0; if (!ExecuteBlockingCommand (query, &res)) { Rollback(); newFiles.Clear(); return false; } // Create new patches for every create version MYSQL_ROW row; while ((row = mysql_fetch_row (res)) != 0) { const char * fileID = row [0]; // The last query handled all the relevant comparisons sprintf(query, "SELECT content from FileVersionHistory WHERE fileID=%s", fileID ); MYSQL_RES * queryResult = 0; if (!ExecuteBlockingCommand (query, &queryResult)) { Rollback(); newFiles.Clear(); mysql_free_result(res); return false; } MYSQL_ROW queryRow = mysql_fetch_row (queryResult); const unsigned contentLength=mysql_fetch_lengths (queryResult) [0]; const char * content=queryRow [0]; char *patch; unsigned patchLength; if (!CreatePatch(content, contentLength, (char *) hardDriveData, hardDriveDataLength, &patch, &patchLength)) { strcpy(lastError,"CreatePatch failed."); Rollback(); newFiles.Clear(); mysql_free_result(res); mysql_free_result(queryResult); return false; } char buf[512]; stmt = mysql_stmt_init(mySqlConnection); sprintf (buf, "UPDATE FileVersionHistory SET patch=? where fileID=%s;", fileID); if ((prepareResult=mysql_stmt_prepare(stmt, buf, (unsigned long) strlen(buf)))!=0) { strcpy (lastError, mysql_stmt_error (stmt)); mysql_stmt_close(stmt); Rollback(); return false; } memset(bind, 0, sizeof(bind)); unsigned long l1; l1=patchLength; bind[0].buffer_type= MYSQL_TYPE_LONG_BLOB; bind[0].buffer= patch; bind[0].buffer_length= patchLength; bind[0].is_null= &falseVar; bind[0].length=&l1; if (mysql_stmt_bind_param(stmt, bind)) { strcpy (lastError, mysql_stmt_error (stmt)); mysql_stmt_close(stmt); Rollback(); return false; } if (mysql_stmt_execute(stmt)) { strcpy (lastError, mysql_stmt_error (stmt)); mysql_stmt_close(stmt); Rollback(); newFiles.Clear(); mysql_free_result(res); mysql_free_result(queryResult); delete [] patch; return false; } mysql_stmt_close(stmt); delete [] patch; mysql_free_result(queryResult); } mysql_free_result(res); stmt = mysql_stmt_init(mySqlConnection); sprintf(query, "INSERT INTO FileVersionHistory (applicationID, filename, fileLength, content, contentHash, createFile, changeSetID, userName) " "VALUES (%i, ?, %i,?,?, TRUE, %i, '%s' );", applicationID, hardDriveDataLength, changeSetId, GetEscapedString(userName).C_String()); if ((prepareResult=mysql_stmt_prepare(stmt, query, (unsigned long) strlen(query)))!=0) { strcpy (lastError, mysql_stmt_error (stmt)); mysql_stmt_close(stmt); Rollback(); return false; } memset(bind, 0, sizeof(bind)); unsigned long l2,l3,l4; l2=(unsigned long) strlen(hardDriveFilename); l3=hardDriveDataLength; l4=HASH_LENGTH; bind[0].buffer_type= MYSQL_TYPE_STRING; bind[0].buffer= (void*) hardDriveFilename; bind[0].buffer_length= (unsigned long) strlen(hardDriveFilename); bind[0].is_null= &falseVar; bind[0].length=&l2; bind[1].buffer_type= MYSQL_TYPE_LONG_BLOB; bind[1].buffer= (void*) hardDriveData; bind[1].buffer_length= hardDriveDataLength; bind[1].is_null= &falseVar; bind[1].length=&l3; bind[2].buffer_type= MYSQL_TYPE_TINY_BLOB; bind[2].buffer= (void*) hardDriveHash; bind[2].buffer_length= HASH_LENGTH; bind[2].is_null= &falseVar; bind[2].length=&l4; if (mysql_stmt_bind_param(stmt, bind)) { strcpy (lastError, mysql_stmt_error (stmt)); mysql_stmt_close(stmt); Rollback(); return false; } if (mysql_stmt_execute(stmt)) { strcpy (lastError, mysql_stmt_error (stmt)); mysql_stmt_close(stmt); Rollback(); return false; } mysql_stmt_close(stmt); } if (!ExecuteBlockingCommand("COMMIT;")) { Rollback (); return false; } return true; }
PluginReceiveResult AutopatcherServer::OnGetPatch(Packet *packet) { RakNet::BitStream inBitStream(packet->data, packet->length, false); ThreadData threadData; inBitStream.IgnoreBits(8); inBitStream.Read(threadData.setId); double lastUpdateDate; inBitStream.Read(lastUpdateDate); inBitStream.ReadCompressed(threadData.applicationName); threadData.clientList=0; // Check in-memory cache, use if possible rather than accessing database if (threadData.applicationName==cache_appName && lastUpdateDate!=0 && cacheLoaded && cache_minTime!=0 && lastUpdateDate>cache_minTime) { threadData.systemAddress=packet->systemAddress; threadData.server=this; threadData.clientList=RakNet::OP_NEW<FileList>( _FILE_AND_LINE_ ); if (threadData.clientList->Deserialize(&inBitStream)==false) { RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } if (threadData.clientList->fileList.Size()==0) { RakAssert(0); RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } char *userHash; RakNet::RakString userFilename; FileList patchList; bool cacheUpdateFailed=false; unsigned int i,j; // FileList is the list of all files missing or changed as determined by the client for (i=0; i < threadData.clientList->fileList.Size(); i++) { userHash=threadData.clientList->fileList[i].data; userFilename=threadData.clientList->fileList[i].filename; if (userHash) { // If the user has a hash, check for this file in cache_patchedFiles. If not found, or hash is wrong, use DB if (threadData.clientList->fileList[i].dataLengthBytes!=HASH_LENGTH) { RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } for (j=0; j < cache_patchedFiles.fileList.Size(); j++) { if (userFilename == cache_patchedFiles.fileList[j].filename) { if (memcmp(cache_patchedFiles.fileList[j].data, userHash, HASH_LENGTH)==0) { // Send patch RakAssert(cache_patchedFiles.fileList[j].context.op==PC_HASH_2_WITH_PATCH); patchList.AddFile(userFilename,userFilename, 0, cache_patchedFiles.fileList[j].dataLengthBytes, cache_patchedFiles.fileList[j].fileLengthBytes, cache_patchedFiles.fileList[j].context, true, false); } else { // Bad hash cacheUpdateFailed=true; } break; } } if (j==cache_patchedFiles.fileList.Size()) { // Didn't find the patch even though the client has an older version of the file cacheUpdateFailed=true; } } else { // If the user does not have a hash, check for this file in cache_addedFiles. If not found, use DB for (j=0; j < cache_addedFiles.fileList.Size(); j++) { if (userFilename == cache_addedFiles.fileList[j].filename) { // Send added file patchList.AddFile(userFilename,userFilename, 0, cache_addedFiles.fileList[j].dataLengthBytes, cache_addedFiles.fileList[j].fileLengthBytes, cache_addedFiles.fileList[j].context, true, false); break; } } if (j==cache_addedFiles.fileList.Size()) { // Didn't find the file in the cache even though the client asked for it cacheUpdateFailed=true; } } if (cacheUpdateFailed==true) { // Failure to find file in cache // Will fall to use database patchList.Clear(); break; } } if (patchList.fileList.Size()>0) { if (IncrementPatchingUserCount(packet->systemAddress)) { fileListTransfer->Send(&patchList, 0, packet->systemAddress, threadData.setId, priority, orderingChannel, this, 262144*4*4); RakNet::BitStream bitStream1; bitStream1.Write((unsigned char) ID_AUTOPATCHER_FINISHED_INTERNAL); double t =(double) time(NULL); bitStream1.Write(t); SendUnified(&bitStream1, priority, RELIABLE_ORDERED, orderingChannel, packet->systemAddress, false); RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); return RR_STOP_PROCESSING_AND_DEALLOCATE; } } } RakNet::OP_DELETE(threadData.clientList, _FILE_AND_LINE_); if (PatchingUserLimitReached()) { AddToWaitingQueue(packet); return RR_STOP_PROCESSING; } OnGetPatchInt(packet); return RR_STOP_PROCESSING_AND_DEALLOCATE; }