void Table::SortTable(Table::SortQuery *sortQueries, unsigned numSortQueries, Table::Row** out)
{
	unsigned i;
	unsigned outLength;
	DataStructures::List<unsigned> columnIndices;
	_sortQueries=sortQueries;
	_numSortQueries=numSortQueries;
	_columnIndices=&columnIndices;
	_columns=&columns;
	bool anyValid=false;

	for (i=0; i < numSortQueries; i++)
	{
		if (sortQueries[i].columnIndex<columns.Size() && columns[sortQueries[i].columnIndex].columnType!=BINARY)
		{
			columnIndices.Insert(sortQueries[i].columnIndex, _FILE_AND_LINE_);
			anyValid=true;
		}
		else
			columnIndices.Insert((unsigned)-1, _FILE_AND_LINE_); // Means don't check this column
	}

	DataStructures::Page<unsigned, Row*, _TABLE_BPLUS_TREE_ORDER> *cur;
	cur = rows.GetListHead();
	if (anyValid==false)
	{
		outLength=0;
		while (cur)
		{
			for (i=0; i < (unsigned)cur->size; i++)
			{
				out[(outLength)++]=cur->data[i];
			}
			cur=cur->next;
		}
		return;
	}

	// Start adding to ordered list.
	DataStructures::OrderedList<Row*, Row*, RowSort> orderedList;
	while (cur)
	{
		for (i=0; i < (unsigned)cur->size; i++)
		{
			RakAssert(cur->data[i]);
			orderedList.Insert(cur->data[i],cur->data[i], true, _FILE_AND_LINE_);
		}
		cur=cur->next;
	}

	outLength=0;
	for (i=0; i < orderedList.Size(); i++)
		out[(outLength)++]=orderedList[i];
}
Exemplo n.º 2
0
void GridSectorizer::GetEntries(DataStructures::List<void*>& intersectionList, const float minX, const float minY, const float maxX, const float maxY)
{
#ifdef _USE_ORDERED_LIST
	DataStructures::OrderedList<void*, void*>* cell;
#else
	DataStructures::List<void*>* cell;
#endif
	int xStart, yStart, xEnd, yEnd, xCur, yCur;
	unsigned index;
	xStart=WorldToCellXOffsetAndClamped(minX);
	yStart=WorldToCellYOffsetAndClamped(minY);
	xEnd=WorldToCellXOffsetAndClamped(maxX);
	yEnd=WorldToCellYOffsetAndClamped(maxY);

	intersectionList.Clear(true, _FILE_AND_LINE_);
	for (xCur=xStart; xCur <= xEnd; ++xCur)
	{
		for (yCur=yStart; yCur <= yEnd; ++yCur)
		{
			cell = grid+yCur*gridCellWidthCount+xCur;
			for (index=0; index < cell->Size(); ++index)
				intersectionList.Insert(cell->operator [](index), _FILE_AND_LINE_);
		}
	}
}
Exemplo n.º 3
0
bool Router::Send( char *data, BitSize_t bitLength, PacketPriority priority, PacketReliability reliability, char orderingChannel, SystemAddressList *recipients )
{
	RakAssert(data);
	RakAssert(bitLength);
	if (recipients->GetList()->Size()==0)
		return false;
	if (bitLength==0)
		return false;
	DataStructures::Tree<ConnectionGraph::SystemAddressAndGroupId> tree;
	SystemAddress root;
	root = rakPeerInterface->GetExternalID(rakPeerInterface->GetSystemAddressFromIndex(0));
	if (root==UNASSIGNED_SYSTEM_ADDRESS)
		return false;
	DataStructures::List<ConnectionGraph::SystemAddressAndGroupId> recipientList;
	unsigned i;
	for (i=0; i < recipients->Size(); i++)
		recipientList.Insert(ConnectionGraph::SystemAddressAndGroupId(recipients->GetList()->operator [](i),0, UNASSIGNED_RAKNET_GUID), __FILE__, __LINE__);
	if (graph->GetSpanningTree(tree, &recipientList, ConnectionGraph::SystemAddressAndGroupId(root,0,UNASSIGNED_RAKNET_GUID), 65535)==false)
		return false;

	RakNet::BitStream out;

	// Write timestamp first, if the user had a timestamp
	if (data[0]==ID_TIMESTAMP && bitLength >= BYTES_TO_BITS(sizeof(MessageID)+sizeof(RakNetTime)))
	{
		out.Write(data, sizeof(MessageID)+sizeof(RakNetTime));
		data+=sizeof(MessageID)+sizeof(RakNetTime);
		bitLength-=BYTES_TO_BITS(sizeof(MessageID)+sizeof(RakNetTime));
	}

	SendTree(priority, reliability, orderingChannel, &tree, data, bitLength, &out, recipients);
	return true;
}
void LightweightDatabaseServer::RemoveRowsFromIP(PlayerID playerId)
{
	// Remove rows for tables that do so on a system disconnect
	DatabaseTable *databaseTable;
	DataStructures::List<unsigned> removeList;
	DataStructures::Page<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> *cur;
	unsigned i,j;
	for (i=0; i < database.Size(); i++)
	{
		databaseTable=database[i];
		if (databaseTable->removeRowOnDisconnect)
		{
			DataStructures::BPlusTree<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> &rows = databaseTable->table.GetRows();
			cur = rows.GetListHead();
			while (cur)
			{
				// Mark dropped entities
				for (j=0; j < (unsigned)cur->size; j++)
				{
					if (RowHasIP(cur->data[j], playerId, databaseTable->systemIdColumnIndex))
						removeList.Insert(cur->keys[j]);
				}
				cur=cur->next;
			}

			for (j=0; j < removeList.Size(); j++)
				databaseTable->table.RemoveRow(removeList[i]);
			removeList.Clear(true);
		}
	}	
}
Exemplo n.º 5
0
void VariadicSQLParser::GetTypeMappingIndices( const char *format, DataStructures::List<IndexAndType> &indices )
{
	bool previousCharWasPercentSign;
	unsigned int i;
	unsigned int typeMappingIndex;
	indices.Clear(false, _FILE_AND_LINE_);
	unsigned int len = (unsigned int) strlen(format);
	previousCharWasPercentSign=false;
	for (i=0; i < len; i++)
	{
		if (previousCharWasPercentSign==true )
		{
			typeMappingIndex = GetTypeMappingIndex(format[i]);
			if (typeMappingIndex!=(unsigned int) -1)
			{
				IndexAndType iat;
				iat.strIndex=i-1;
				iat.typeMappingIndex=typeMappingIndex;
				indices.Insert(iat, _FILE_AND_LINE_ );
			}
		}

		previousCharWasPercentSign=format[i]=='%';
	}
}
Exemplo n.º 6
0
void ConnectionGraph::SerializeIgnoreListAndBroadcast(RakNet::BitStream *outBitstream, DataStructures::OrderedList<SystemAddress,SystemAddress> &ignoreList, RakPeerInterface *peer)
{
	DataStructures::List<SystemAddress> sendList;
	unsigned i;
	for (i=0; i < participantList.Size(); i++)
	{
		if (ignoreList.HasData(participantList[i])==false)
			sendList.Insert(participantList[i], __FILE__, __LINE__);
	}
	if (sendList.Size()==0)
		return;

	SystemAddress self = peer->GetExternalID(sendList[0]);
	ignoreList.Insert(self,self, false, __FILE__, __LINE__);
	outBitstream->Write((unsigned short) (ignoreList.Size()+sendList.Size()));
	for (i=0; i < ignoreList.Size(); i++)
		outBitstream->Write(ignoreList[i]);
	for (i=0; i < sendList.Size(); i++)
		outBitstream->Write(sendList[i]);

	for (i=0; i < sendList.Size(); i++)
	{
		peer->Send(outBitstream, LOW_PRIORITY, RELIABLE_ORDERED, connectionGraphChannel, sendList[i], false);
	}
}
void NatPunchthroughServer::OnClosedConnection(SystemAddress systemAddress, RakNetGUID rakNetGUID, PI2_LostConnectionReason lostConnectionReason )
{
	(void) lostConnectionReason;
	(void) systemAddress;

	unsigned int i=0;
	bool objectExists;
	i = users.GetIndexFromKey(rakNetGUID, &objectExists);
	if (objectExists)
	{
		RakNet::BitStream outgoingBs;
		DataStructures::List<User *> freedUpInProgressUsers;
		User *user = users[i];
		User *otherUser;
		unsigned int connectionAttemptIndex;
		ConnectionAttempt *connectionAttempt;
		for (connectionAttemptIndex=0; connectionAttemptIndex < user->connectionAttempts.Size(); connectionAttemptIndex++)
		{
			connectionAttempt=user->connectionAttempts[connectionAttemptIndex];
			outgoingBs.Reset();
			if (connectionAttempt->recipient==user)
			{
				otherUser=connectionAttempt->sender;
			}
			else
			{
				otherUser=connectionAttempt->recipient;
			}

			// 05/28/09 Previously only told sender about ID_NAT_CONNECTION_TO_TARGET_LOST
			// However, recipient may be expecting it due to external code
			// In that case, recipient would never get any response if the sender dropped
			outgoingBs.Write((MessageID)ID_NAT_CONNECTION_TO_TARGET_LOST);
			outgoingBs.Write(rakNetGUID);
			outgoingBs.Write(connectionAttempt->sessionId);
			rakPeerInterface->Send(&outgoingBs,HIGH_PRIORITY,RELIABLE_ORDERED,0,otherUser->systemAddress,false);

			// 4/22/09 - Bug: was checking inProgress, legacy variable not used elsewhere
			if (connectionAttempt->attemptPhase==ConnectionAttempt::NAT_ATTEMPT_PHASE_GETTING_RECENT_PORTS)
			{
				otherUser->isReady=true;
				freedUpInProgressUsers.Insert(otherUser, _FILE_AND_LINE_ );
			}

			otherUser->DeleteConnectionAttempt(connectionAttempt);
		}

		RakNet::OP_DELETE(users[i], _FILE_AND_LINE_);
		users.RemoveAtIndex(i);

		for (i=0; i < freedUpInProgressUsers.Size(); i++)
		{
			StartPunchthroughForUser(freedUpInProgressUsers[i]);
		}
	}
}
Exemplo n.º 8
0
void NatPunchthrough::ConnectionRequest::GetAddressList(RakPeerInterface *rakPeer, DataStructures::List<SystemAddress> &fallbackAddresses, SystemAddress publicAddress, SystemAddress privateAddress, bool excludeConnected)
{
	SystemAddress fallback;
	fallbackAddresses.Clear(true);
	fallback.binaryAddress=publicAddress.binaryAddress;

	fallback.port=publicAddress.port;
	if (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false)
		fallbackAddresses.Insert(fallback);

	if (privateAddress!=publicAddress && privateAddress!=UNASSIGNED_SYSTEM_ADDRESS && (excludeConnected==false ||rakPeer->IsConnected(privateAddress,true)==false))
		fallbackAddresses.Insert(privateAddress);

	fallback.port=publicAddress.port+1;
	if (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false)
		fallbackAddresses.Insert(fallback);
	fallback.port=publicAddress.port+2;
	if (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false)
		fallbackAddresses.Insert(fallback);

	fallback.port=rakPeer->GetInternalID().port;
	if (fallbackAddresses.GetIndexOf(fallback)==(unsigned) -1 && (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false))
		fallbackAddresses.Insert(fallback);
	fallback.port=rakPeer->GetInternalID().port+1;
	if (fallbackAddresses.GetIndexOf(fallback)==(unsigned) -1 && (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false))
		fallbackAddresses.Insert(fallback);
	fallback.port=rakPeer->GetInternalID().port+2;
	if (fallbackAddresses.GetIndexOf(fallback)==(unsigned) -1 && (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false))
		fallbackAddresses.Insert(fallback);

	// Try to keep down the number of fallbacks or the router may run out of mappings
	/*
	fallback.port=publicAddress.port+3;
	if (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false)
		fallbackAddresses.Insert(fallback);
	fallback.port=publicAddress.port+4;
	if (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false)
		fallbackAddresses.Insert(fallback);

	fallback.port=rakPeer->GetInternalID().port+3;
	if (fallbackAddresses.GetIndexOf(fallback)==(unsigned) -1 && (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false))
		fallbackAddresses.Insert(fallback);
	fallback.port=rakPeer->GetInternalID().port+4;
	if (fallbackAddresses.GetIndexOf(fallback)==(unsigned) -1 && (excludeConnected==false || rakPeer->IsConnected(fallback,true)==false))
		fallbackAddresses.Insert(fallback);
		*/

}
Exemplo n.º 9
0
/**
* _findfirst - equivalent
*/
long _findfirst(const char *name, _finddata_t *f)
{
    std::string nameCopy = name;
    std::string filter;

    // This is linux only, so don't bother with '\'
    const char *lastSep = strrchr(name, '/');
    if (!lastSep)
    {
        // filter pattern only is given, search current directory.
        filter = nameCopy;
        nameCopy = ".";
    }
    else
    {
        // strip filter pattern from directory name, leave
        // trailing '/' intact.
        filter = lastSep + 1;
        size_t sepIndex = lastSep - name;
        nameCopy.erase(sepIndex + 1, nameCopy.length() - sepIndex - 1);
    }

    DIR *dir = opendir(nameCopy.c_str());

    if (!dir)
        return -1;

    _findinfo_t *fi = new _findinfo_t;
    fi->filter = filter.c_str();
    fi->dirName = nameCopy.c_str();  // we need to remember this for stat()
    fi->openedDir = dir;
    fileInfo.Insert(fi, _FILE_AND_LINE_);

    long ret = fileInfo.Size() - 1;

    // Retrieve the first file. We cannot rely on the first item
    // being '.'
    if (_findnext(ret, f) == -1)
        return -1;
    else
        return ret;
}
Exemplo n.º 10
0
void LightweightDatabaseServer::RemoveRowsFromIP(SystemAddress systemAddress)
{
	// Remove rows for tables that do so on a system disconnect
	DatabaseTable *databaseTable;
	DataStructures::List<unsigned> removeList;
	DataStructures::Table::Row* row;
	DataStructures::Page<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> *cur;
	unsigned i,j;
	for (i=0; i < database.Size(); i++)
	{
		databaseTable=database[i];
		if ((unsigned int) databaseTable->SystemAddressColumnIndex!=(unsigned int)-1)
		{
			const DataStructures::BPlusTree<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> &rows = databaseTable->table.GetRows();
			cur = rows.GetListHead();
			while (cur)
			{
				// Mark dropped entities
				for (j=0; j < (unsigned)cur->size; j++)
				{
					if (RowHasIP(cur->data[j], systemAddress, databaseTable->SystemAddressColumnIndex))
					{
						if (databaseTable->removeRowOnDisconnect)
						{
							removeList.Insert(cur->keys[j], __FILE__, __LINE__);
						}
						else if (databaseTable->removeRowOnPingFailure)
						{
							row = cur->data[j];
							row->cells[databaseTable->nextPingSendColumnIndex]->i=(double)(RakNet::GetTime()+SEND_PING_INTERVAL+(randomMT()%1000));
						}
					}
				}
				cur=cur->next;
			}
		}

		for (j=0; j < removeList.Size(); j++)
			databaseTable->table.RemoveRow(removeList[j]);
		removeList.Clear(true, __FILE__,__LINE__);
	}
}
bool AutopatcherMySQLRepository::UpdateApplicationFiles(const char *applicationName, const char *applicationDirectory, const char *userName, FileListProgress *cb)
{
	MYSQL_STMT    *stmt;
	MYSQL_BIND    bind[3];
	char query[512];
	FileList filesOnHarddrive;
	filesOnHarddrive.SetCallback(cb);
	int prepareResult;
	my_bool falseVar=false;
	RakNet::RakString escapedApplicationName = GetEscapedString(applicationName);
	filesOnHarddrive.AddFilesFromDirectory(applicationDirectory,"", true, true, true, FileListNodeContext(0,0));
	if (filesOnHarddrive.fileList.Size()==0)
	{
		sprintf(lastError,"ERROR: Can't find files at %s in UpdateApplicationFiles\n",applicationDirectory);
		return false;
	}

	sprintf(query, "SELECT applicationID FROM Applications WHERE applicationName='%s';", escapedApplicationName.C_String());
	int applicationID;

	if (!ExecuteQueryReadInt(query, &applicationID))
	{
		sprintf(lastError,"ERROR: %s not found in UpdateApplicationFiles\n",escapedApplicationName.C_String());
		return false;
	}

	if (!ExecuteBlockingCommand("BEGIN;"))
	{
		return false;
	}
	sprintf(query, "UPDATE Applications SET changeSetId = changeSetId + 1 where applicationID=%i;", applicationID);
	if (!ExecuteBlockingCommand(query))
	{
		Rollback ();
		return false;
	}
	int changeSetId = 0;
	sprintf(query, "SELECT changeSetId FROM Applications WHERE applicationID=%i;", applicationID);
	if (!ExecuteQueryReadInt(query, &changeSetId))
	{
		Rollback ();
		return false;
	}

	// +1 was added in the update
	changeSetId--;

	// Gets all newest files
	sprintf(query, "SELECT filename, contentHash, createFile FROM FileVersionHistory "
	               "JOIN (SELECT max(fileId) maxId FROM FileVersionHistory WHERE applicationId=%i GROUP BY fileName) MaxId "
	               "ON FileVersionHistory.fileId = MaxId.maxId "
	               "ORDER BY filename DESC;", applicationID);

	MYSQL_RES *result = 0;
	if (!ExecuteBlockingCommand(query, &result))
	{
		Rollback();
		return false;
	}
	DataStructures::List <FileInfo> newestFiles;
	MYSQL_ROW row;
	while ((row = mysql_fetch_row (result)) != 0)
	{
	    FileInfo fi;
		fi.filename = row [0];
		fi.createFile = (atoi (row [2]) != 0);
		if (fi.createFile)
		{
			RakAssert(mysql_fetch_lengths (result) [1] == HASH_LENGTH);  // check the data is sensible
			memcpy (fi.contentHash, row [1], HASH_LENGTH);
		}
	    newestFiles.Insert (fi);
	}    
	mysql_free_result(result);


	FileList newFiles;
	// Loop through files on filesOnHarddrive
	// If the file in filesOnHarddrive does not exist in the query result, or if it does but the hash is different or non-existent, add this file to the create list
	for (unsigned fileListIndex=0; fileListIndex < filesOnHarddrive.fileList.Size(); fileListIndex++)
	{
		bool addFile=true;
		if (fileListIndex%10==0)
			printf("Hashing files %i/%i\n", fileListIndex+1, filesOnHarddrive.fileList.Size());

		const char * hardDriveFilename=filesOnHarddrive.fileList[fileListIndex].filename;
		const char * hardDriveHash=filesOnHarddrive.fileList[fileListIndex].data;

		for (unsigned i = 0; i != newestFiles.Size (); ++i)
		{
			const FileInfo & fi = newestFiles [i];
                        
			if (_stricmp(hardDriveFilename, fi.filename)==0)
			{
				if (fi.createFile && memcmp(fi.contentHash, hardDriveHash, HASH_LENGTH)==0)
				{
					// File exists in database and is the same
					addFile=false;
				}

				break;
			}
		}

		// Unless set to false, file does not exist in query result or is different.
		if (addFile)
		{
			newFiles.AddFile(hardDriveFilename,hardDriveFilename, filesOnHarddrive.fileList[fileListIndex].data, filesOnHarddrive.fileList[fileListIndex].dataLengthBytes, filesOnHarddrive.fileList[fileListIndex].fileLengthBytes, FileListNodeContext(0,0), false);
		}
	}
	
	// Go through query results that are marked as create
	// If a file that is currently in the database is not on the harddrive, add it to the delete list
	FileList deletedFiles;
	for (unsigned i = 0; i != newestFiles.Size (); ++i)
	{
		const FileInfo & fi = newestFiles [i];
		if (!fi.createFile)
			continue; // If already false don't mark false again.

		bool fileOnHarddrive=false;
		for (unsigned fileListIndex=0; fileListIndex < filesOnHarddrive.fileList.Size(); fileListIndex++)
		{
			const char * hardDriveFilename=filesOnHarddrive.fileList[fileListIndex].filename;
			//hardDriveHash=filesOnHarddrive.fileList[fileListIndex].data;

			if (_stricmp(hardDriveFilename, fi.filename)==0)
			{
				fileOnHarddrive=true;
				break;
			}
		}

		if (!fileOnHarddrive)
			deletedFiles.AddFile(fi.filename,fi.filename,0,0,0,FileListNodeContext(0,0), false);
	}

	// files on harddrive no longer needed.  Free this memory since generating all the patches is memory intensive.
	filesOnHarddrive.Clear();

	// For each file in the delete list add a row indicating file deletion
	for (unsigned fileListIndex=0; fileListIndex < deletedFiles.fileList.Size(); fileListIndex++)
	{
		if (fileListIndex%10==0)
			printf("Tagging deleted files %i/%i\n", fileListIndex+1, deletedFiles.fileList.Size());

		sprintf(query, "INSERT INTO FileVersionHistory(applicationID, filename, createFile, changeSetID, userName) VALUES (%i, '%s', FALSE,%i,'%s');", 
			applicationID, GetEscapedString(deletedFiles.fileList[fileListIndex].filename).C_String(), changeSetId, GetEscapedString(userName).C_String());
		
		if (!ExecuteBlockingCommand (query))
		{
			Rollback();
			deletedFiles.Clear();
			newFiles.Clear();
			return false;
		}
	}
	
	// Clear the delete list as it is no longer needed.
	deletedFiles.Clear();

	// For each file in the create list
	for (unsigned fileListIndex=0; fileListIndex < newFiles.fileList.Size(); fileListIndex++)
	{
		if (fileListIndex%10==0)
			printf("Adding file %i/%i\n", fileListIndex+1, newFiles.fileList.Size());
		const char * hardDriveFilename=newFiles.fileList[fileListIndex].filename;
		const char * hardDriveData=newFiles.fileList[fileListIndex].data+HASH_LENGTH;
		const char * hardDriveHash=newFiles.fileList[fileListIndex].data;
		unsigned     hardDriveDataLength=newFiles.fileList[fileListIndex].fileLengthBytes;

		sprintf( query, "SELECT fileID from FileVersionHistory WHERE applicationID=%i AND filename='%s' AND createFile=TRUE;", applicationID, GetEscapedString(hardDriveFilename).C_String() );

		MYSQL_RES * res = 0;
		if (!ExecuteBlockingCommand (query, &res))
		{
			Rollback();
			newFiles.Clear();
			return false;
		}
		
		// Create new patches for every create version
		MYSQL_ROW row;

		while ((row = mysql_fetch_row (res)) != 0)
		{
			const char * fileID = row [0];
			
			// The last query handled all the relevant comparisons
			sprintf(query, "SELECT content from FileVersionHistory WHERE fileID=%s", fileID );
			MYSQL_RES * queryResult = 0;
			if (!ExecuteBlockingCommand (query, &queryResult))
			{
				Rollback();
				newFiles.Clear();
				mysql_free_result(res);
				return false;
			}
		
			MYSQL_ROW queryRow = mysql_fetch_row (queryResult);

			const unsigned contentLength=mysql_fetch_lengths (queryResult) [0];
			const char * content=queryRow [0];

			char *patch;
			unsigned patchLength;	
			if (!CreatePatch(content, contentLength, (char *) hardDriveData, hardDriveDataLength, &patch, &patchLength))
			{
				strcpy(lastError,"CreatePatch failed.");
				Rollback();

				newFiles.Clear();
				mysql_free_result(res);
				mysql_free_result(queryResult);
				return false;
			}
			
			char buf[512];
			stmt = mysql_stmt_init(mySqlConnection);
			sprintf (buf, "UPDATE FileVersionHistory SET patch=? where fileID=%s;", fileID);
			if ((prepareResult=mysql_stmt_prepare(stmt, buf, (unsigned long) strlen(buf)))!=0)
			{
				strcpy (lastError, mysql_stmt_error (stmt));
				mysql_stmt_close(stmt);
				Rollback();
				return false;
			}
			memset(bind, 0, sizeof(bind));

			unsigned long l1;
			l1=patchLength;
			bind[0].buffer_type= MYSQL_TYPE_LONG_BLOB;
			bind[0].buffer= patch;
			bind[0].buffer_length= patchLength;
			bind[0].is_null= &falseVar;
			bind[0].length=&l1;

			if (mysql_stmt_bind_param(stmt, bind))
			{
				strcpy (lastError, mysql_stmt_error (stmt));
				mysql_stmt_close(stmt);
				Rollback();
				return false;
			}

			if (mysql_stmt_execute(stmt))
			{
				strcpy (lastError, mysql_stmt_error (stmt));
				mysql_stmt_close(stmt);
				Rollback();
				newFiles.Clear();
				mysql_free_result(res);
				mysql_free_result(queryResult);
				delete [] patch;
				return false;
			}

			mysql_stmt_close(stmt);
			delete [] patch;

			mysql_free_result(queryResult);
		}
         mysql_free_result(res);

		 stmt = mysql_stmt_init(mySqlConnection);
		 sprintf(query, "INSERT INTO FileVersionHistory (applicationID, filename, fileLength, content, contentHash, createFile, changeSetID, userName) "
			 "VALUES (%i, ?, %i,?,?, TRUE, %i, '%s' );", 
			 applicationID, hardDriveDataLength, changeSetId, GetEscapedString(userName).C_String());

		 if ((prepareResult=mysql_stmt_prepare(stmt, query, (unsigned long) strlen(query)))!=0)
		 {
			 strcpy (lastError, mysql_stmt_error (stmt));
			 mysql_stmt_close(stmt);
			 Rollback();
			 return false;
		 }
		 memset(bind, 0, sizeof(bind));

		 unsigned long l2,l3,l4;
		 l2=(unsigned long) strlen(hardDriveFilename);
		 l3=hardDriveDataLength;
		 l4=HASH_LENGTH;
		 bind[0].buffer_type= MYSQL_TYPE_STRING;
		 bind[0].buffer= (void*) hardDriveFilename;
		 bind[0].buffer_length= (unsigned long) strlen(hardDriveFilename);
		 bind[0].is_null= &falseVar;
		 bind[0].length=&l2;

		 bind[1].buffer_type= MYSQL_TYPE_LONG_BLOB;
		 bind[1].buffer= (void*) hardDriveData;
		 bind[1].buffer_length= hardDriveDataLength;
		 bind[1].is_null= &falseVar;
		 bind[1].length=&l3;

		 bind[2].buffer_type= MYSQL_TYPE_TINY_BLOB;
		 bind[2].buffer= (void*) hardDriveHash;
		 bind[2].buffer_length= HASH_LENGTH;
		 bind[2].is_null= &falseVar;
		 bind[2].length=&l4;

		 if (mysql_stmt_bind_param(stmt, bind))
		 {
			 strcpy (lastError, mysql_stmt_error (stmt));
			 mysql_stmt_close(stmt);
			 Rollback();
			 return false;
		 }

		 if (mysql_stmt_execute(stmt))
		 {
			 strcpy (lastError, mysql_stmt_error (stmt));
			 mysql_stmt_close(stmt);
			 Rollback();
			 return false;
		 }

		 mysql_stmt_close(stmt);
	}

	if (!ExecuteBlockingCommand("COMMIT;"))
	{
        Rollback ();
		return false;
	}

	return true;
}
Exemplo n.º 12
0
void Table::QueryTable(unsigned *columnIndicesSubset, unsigned numColumnSubset, FilterQuery *inclusionFilters, unsigned numInclusionFilters, unsigned *rowIds, unsigned numRowIDs, Table *result)
{
	unsigned i;
	DataStructures::List<unsigned> columnIndicesToReturn;

	// Clear the result table.
	result->Clear();

	if (columnIndicesSubset && numColumnSubset>0)
	{
		for (i=0; i < numColumnSubset; i++)
		{
			if (columnIndicesSubset[i]>=0 && columnIndicesSubset[i]<columns.Size())
				columnIndicesToReturn.Insert(columnIndicesSubset[i]);
		}
	}
	else
	{
		for (i=0; i < columns.Size(); i++)
			columnIndicesToReturn.Insert(i);
	}

	if (columnIndicesToReturn.Size()==0)
		return; // No valid columns specified

	for (i=0; i < columnIndicesToReturn.Size(); i++)
	{
		result->AddColumn(columns[columnIndicesToReturn[i]].columnName,columns[columnIndicesToReturn[i]].columnType);
	}

	// Get the column indices of the filter queries.
	DataStructures::List<unsigned> inclusionFilterColumnIndices;
	if (inclusionFilters && numInclusionFilters>0)
	{
		for (i=0; i < numInclusionFilters; i++)
		{
			if (inclusionFilters[i].columnName[0])
				inclusionFilters[i].columnIndex=ColumnIndex(inclusionFilters[i].columnName);
			if (inclusionFilters[i].columnIndex>=0 && inclusionFilters[i].columnIndex<columns.Size())
				inclusionFilterColumnIndices.Insert(inclusionFilters[i].columnIndex);
			else
				inclusionFilterColumnIndices.Insert((unsigned)-1);
		}
	}

	if (rowIds==0 || numRowIDs==0)
	{
		// All rows
		DataStructures::Page<unsigned, Row*, _TABLE_BPLUS_TREE_ORDER> *cur = rows.GetListHead();
		while (cur)
		{
			for (i=0; i < (unsigned)cur->size; i++)
			{
				QueryRow(inclusionFilterColumnIndices, columnIndicesToReturn, cur->keys[i], cur->data[i], inclusionFilters, result);
			}
			cur=cur->next;
		}
	}
	else
	{
		// Specific rows
		Row *row;
		for (i=0; i < numRowIDs; i++)
		{
			if (rows.Get(rowIds[i], row))
			{
				QueryRow(inclusionFilterColumnIndices, columnIndicesToReturn, rowIds[i], row, inclusionFilters, result);
			}
		}
	}
}
Exemplo n.º 13
0
/**
* _findfirst - equivalent
*/
long _findfirst(const char *name, _finddata_t *f)
{

	//   char* nameCopy = new char[sizeof(name)];
	//   memset(nameCopy, '\0', sizeof(nameCopy));
	//   
	//   strcpy(nameCopy, name);
	//
	//   char* filter = new char[sizeof(nameCopy)];
	//   memset(filter, '\0', sizeof(filter));

	int length = strlen(name)+1;
	char* nameCopy = new char[length];
	memset(nameCopy, '\0', length);

	strcpy(nameCopy, name);

	char* filter = new char[length];
	memset(filter, '\0', length);

	char* lastSep = strrchr(nameCopy,'/');
	if(!lastSep)
	{
		strcpy(filter, nameCopy);
		strcpy(nameCopy, ".");
	}
	else
	{
		strcpy(filter, lastSep+1);
		*lastSep = 0;
	}

	DIR* dir = opendir(nameCopy);

	if(!dir)
	{
		return -1;
	}

	_findinfo_t* fi = new _findinfo_t;
	strcpy(fi->filter,filter);
	fi->openedDir = dir;

	while(true)
	{
		dirent* entry = readdir(dir);
		if(entry == 0)
			break;

		if(fnmatch(fi->filter,entry->d_name, 200) == 0)
		{
			strcpy(f->name, entry->d_name);
			break;
		}
	}


	fileInfo.Insert(fi);
	return fileInfo.Size()-1;

	//   return 0;
}
void LightweightDatabaseServer::Update(RakPeerInterface *peer)
{
	RakNetTime time=0;
	DatabaseTable *databaseTable;
	DataStructures::Page<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> *cur;
	unsigned i,j;
	DataStructures::Table::Row* row;
	DataStructures::List<unsigned> removeList;
	PlayerID playerId;

	// periodic ping if removing system that do not respond to pings.
	for (i=0; i < database.Size(); i++)
	{
		databaseTable=database[i];

		if (databaseTable->removeRowOnPingFailure)
		{
			// Reading the time is slow - only do it once if necessary.
			if (time==0)
				 time = RakNet::GetTime();

			if (databaseTable->nextRowPingCheck < time)
			{
				databaseTable->nextRowPingCheck=time+1000+(randomMT()%1000);
				DataStructures::BPlusTree<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> &rows = databaseTable->table.GetRows();
				cur = rows.GetListHead();
				while (cur)
				{
					// Mark dropped entities
					for (j=0; j < (unsigned)cur->size; j++)
					{
						row = cur->data[j];
						if (time - row->cells[databaseTable->lastPingResponseColumnIndex]->i > DROP_SERVER_INTERVAL)
							removeList.Insert(cur->keys[j]);
					}
					cur=cur->next;
				}

				// Remove dropped entities
				for (j=0; j < removeList.Size(); j++)
					databaseTable->table.RemoveRow(removeList[i]);
				removeList.Clear(true);

				cur = rows.GetListHead();
				// Ping remaining entities if they are not connected.  If they are connected just increase the ping interval.
				while (cur)
				{
					for (j=0; j < (unsigned)cur->size; j++)
					{
						row = cur->data[j];
						if (row->cells[databaseTable->nextPingSendColumnIndex]->i < (int) time)
						{
							row->cells[databaseTable->systemIdColumnIndex]->Get((char*)&playerId, 0);
							if (peer->GetIndexFromPlayerID(playerId)==-1)
							{
								peer->Ping(playerId.ToString(false), playerId.port, false);
							}
							else
							{
								// Consider the fact that they are connected to be a ping response
								row->cells[databaseTable->lastPingResponseColumnIndex]->i=time;
							}
							
							row->cells[databaseTable->nextPingSendColumnIndex]->i=time+SEND_PING_INTERVAL+(randomMT()%1000);
						}
					}
					cur=cur->next;
				}
			}
		}
	}
}
Exemplo n.º 15
0
void MasterServer::HandleQuery(Packet *packet)
{
	DataStructures::List<GameServer*> serversWithKeysList;
	char ruleIdentifier[256];
	unsigned index, serverIndex;
	int key;
	bool queryAll;
	BitStream outputBitStream;
	BitStream compressedString(packet->data, packet->length, false);
	compressedString.IgnoreBits(8*sizeof(unsigned char));

	queryAll=true;

	while (compressedString.GetNumberOfUnreadBits()>0)
	{
		// Generate a list of the indices of the servers that have one or more of the specified keys.
		stringCompressor->DecodeString(ruleIdentifier, 256, &compressedString);
		if (ruleIdentifier[0]==0)
			// If we fail to read the first string, queryAll remains true.
			break;
		
		queryAll=false;

		if (IsReservedRuleIdentifier(ruleIdentifier))
			continue;

		for (index=0; index < gameServerList.serverList.Size(); index++)
		{
			if (gameServerList.serverList[index]->connectionIdentifier==UNASSIGNED_PLAYER_ID)
				continue;

			if (gameServerList.serverList[index]->FindKey(ruleIdentifier))
			{
				serverIndex=serversWithKeysList.GetIndexOf(gameServerList.serverList[index]);
				if (serverIndex==MAX_UNSIGNED_LONG)
				{
					gameServerList.serverList[index]->numberOfKeysFound=1;
					serversWithKeysList.Insert(gameServerList.serverList[index]);
				}
				else
				{
					serversWithKeysList[serverIndex]->numberOfKeysFound++;
				}
			}
		}
	}
	
	// Write the packet id
	if (queryAll)
		outputBitStream.Write((unsigned char) ID_MASTER_SERVER_SET_SERVER);
	else
		outputBitStream.Write((unsigned char) ID_MASTER_SERVER_UPDATE_SERVER);
	if (queryAll)
	{
		// Write the number of servers
		outputBitStream.WriteCompressed((unsigned short)gameServerList.serverList.Size());

		for (index=0; index < gameServerList.serverList.Size(); index++)
		{
			// Write the whole server
			SerializeServer(gameServerList.serverList[index], &outputBitStream);	
		}		
	}
	else
	{
		compressedString.ResetReadPointer();
		compressedString.IgnoreBits(8*sizeof(unsigned char));

		// Write the number of servers with requested keys
		outputBitStream.WriteCompressed((unsigned short)serversWithKeysList.Size());

		// For each server, write the header which consists of the IP/PORT.
		// Then go through the list of requested keys and write those
		for (index=0; index < serversWithKeysList.Size(); index++)
		{
			SerializePlayerID(&(serversWithKeysList[index]->connectionIdentifier), &outputBitStream);

			outputBitStream.WriteCompressed((unsigned short)serversWithKeysList[index]->numberOfKeysFound);
			while (compressedString.GetNumberOfUnreadBits()>0)
			{
				// Generate a list of the indices of the servers that have one or more of the specified keys.
				stringCompressor->DecodeString(ruleIdentifier, 256, &compressedString);
				if (ruleIdentifier[0]==0)
					break;
				if (IsReservedRuleIdentifier(ruleIdentifier))
					continue;

				serversWithKeysList[index]->FindKey(ruleIdentifier);
				key=serversWithKeysList[index]->keyIndex;
				if (key>=0)
					SerializeRule(serversWithKeysList[index]->serverRules[key], &outputBitStream);
			}
		}
	}

	rakPeer->Send(&outputBitStream, MEDIUM_PRIORITY, RELIABLE, 0, packet->playerId, false);
}
Exemplo n.º 16
0
void LightweightDatabaseServer::Update(void)
{
	RakNetTime time=0;
	DatabaseTable *databaseTable;
	DataStructures::Page<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> *cur;
	unsigned i,j;
	DataStructures::Table::Row* row;
	DataStructures::List<unsigned> removeList;
	SystemAddress systemAddress;

	// periodic ping if removing system that do not respond to pings.
	for (i=0; i < database.Size(); i++)
	{
		databaseTable=database[i];

		if (databaseTable->removeRowOnPingFailure)
		{
			// Reading the time is slow - only do it once if necessary.
			if (time==0)
				time = RakNet::GetTime();

			if (databaseTable->nextRowPingCheck < time)
			{
				databaseTable->nextRowPingCheck=time+1000+(randomMT()%1000);
				const DataStructures::BPlusTree<unsigned, DataStructures::Table::Row*, _TABLE_BPLUS_TREE_ORDER> &rows = databaseTable->table.GetRows();
				cur = rows.GetListHead();
				while (cur)
				{
					// Mark dropped entities
					for (j=0; j < (unsigned)cur->size; j++)
					{
						row = cur->data[j];
						row->cells[databaseTable->SystemAddressColumnIndex]->Get((char*)&systemAddress, 0);
						if (rakPeerInterface->IsConnected(systemAddress)==false)
						{
							if (time > time - (unsigned int) row->cells[databaseTable->lastPingResponseColumnIndex]->i &&
								time - (unsigned int) row->cells[databaseTable->lastPingResponseColumnIndex]->i > (unsigned int) DROP_SERVER_INTERVAL)
							{
								removeList.Insert(cur->keys[j], __FILE__, __LINE__);
							}
							else
							{
								if (row->cells[databaseTable->nextPingSendColumnIndex]->i < (int) time)
								{
									char str1[64];
									systemAddress.ToString(false, str1);
									rakPeerInterface->Ping(str1, systemAddress.port, false);
									row->cells[databaseTable->nextPingSendColumnIndex]->i=(double)(time+SEND_PING_INTERVAL+(randomMT()%1000));
								}
							}
						}
					}
					cur=cur->next;
				}

				// Remove dropped entities
				for (j=0; j < removeList.Size(); j++)
					databaseTable->table.RemoveRow(removeList[i]);
				removeList.Clear(true, __FILE__,__LINE__);

			}
		}
	}
}