PyRep *PyPacket::Encode() { PyTuple *arg_tuple = new PyTuple(6); //command arg_tuple->items[0] = new PyInt(type); //source arg_tuple->items[1] = source.Encode(); //dest arg_tuple->items[2] = dest.Encode(); //unknown3 if(userid == 0) arg_tuple->items[3] = new PyNone(); else arg_tuple->items[3] = new PyInt(userid); //payload //TODO: we don't really need to clone this if we can figure out a way to say "this is read only" //or if we can change this encode method to consume the PyPacket (which will almost always be the case) arg_tuple->items[4] = payload; PyIncRef(payload); //named arguments if(named_payload == NULL) { arg_tuple->items[5] = new PyNone(); } else { arg_tuple->items[5] = named_payload; PyIncRef(named_payload); } return new PyObject( new PyString( type_string ), arg_tuple ); }
void ClientSession::EncodeChanges( PyDict* into ) { PyDict::const_iterator cur, end; cur = mSession->begin(); end = mSession->end(); for(; cur != end; cur++) { PyString* str = cur->first->AsString(); PyTuple* value = cur->second->AsTuple(); PyRep* last = value->GetItem( 0 ); PyRep* current = value->GetItem( 1 ); if( last->hash() != current->hash() ) { // Duplicate tuple PyTuple* t = new PyTuple( 2 ); t->SetItem( 0, last ); PyIncRef( last ); t->SetItem( 1, current ); PyIncRef( current ); into->SetItem( str, t ); PyIncRef( str ); // Update our tuple value->SetItem( 0, current ); PyIncRef( current ); } } mDirty = false; }
PyCachedObject *PyCachedObject::Clone() const { PyCachedObject *res = new PyCachedObject(); res->timestamp = timestamp; res->version = version; res->nodeID = nodeID; res->shared = shared; res->cache = (PyBuffer *) cache; PyIncRef(cache); res->compressed = compressed; res->objectID = objectID; PyIncRef(objectID); return res; }
PyObject *PyCachedObject::Encode() { PyTuple *arg_tuple = new PyTuple(7); PyTuple *versiont = new PyTuple(2); versiont->items[0] = new PyLong(timestamp); versiont->items[1] = new PyInt(version); arg_tuple->items[0] = versiont; arg_tuple->items[1] = new PyNone(); arg_tuple->items[2] = new PyInt(nodeID); arg_tuple->items[3] = new PyInt(shared?1:0); //compression or not, we want to encode this into bytes so it doesn't //get cloned in object form just to be encoded later /* cache->EncodeData(); if(compressed) { uint8 *buf = new uint8[cache->length]; uint32 deflen = DeflatePacket(cache->data, cache->length, buf, cache->length); if(deflen == 0 || deflen >= cache->length) { //failed to deflate or it did no good (client checks this) memcpy(buf, cache->data, cache->length); deflen = cache->length; compressed = false; } //buf is consumed: arg_tuple->items[4] = new PyBuffer(&buf, deflen); } else { //TODO: we dont really need to clone this if we can figure out a way to say "this is read only" //or if we can change this encode method to consume the PyCachedObject (which will almost always be the case) arg_tuple->items[4] = cache->Clone(); }*/ //TODO: we don't really need to clone this if we can figure out a way to say "this is read only" //or if we can change this encode method to consume the PyCachedObject (which will almost always be the case) arg_tuple->items[4] = cache; PyIncRef(cache); arg_tuple->items[5] = new PyInt(compressed?1:0); //same cloning stattement as above. arg_tuple->items[6] = objectID; PyIncRef(objectID); return new PyObject( new PyString( "objectCaching.CachedObject" ), arg_tuple ); }
PyObject *CachedObjectMgr::GetCachedObject(const PyRep *objectID) { const std::string str = OIDToString(objectID); CachedObjMapItr res = m_cachedObjects.find(str); if(res == m_cachedObjects.end()) return NULL; PyCachedObject co; co.timestamp = res->second->timestamp; co.version = res->second->version; co.nodeID = HackCacheNodeID; //hack, doesn't matter until we have multi-node networks. co.shared = true; co.objectID = res->second->objectID; PyIncRef(res->second->objectID); co.cache = res->second->cache; if(res->second->cache->content().size() == 0 || res->second->cache->content()[0] == MarshalHeaderByte) co.compressed = false; else co.compressed = true; sLog.Debug("CachedObjMgr","Returning cached object '%s' with checksum 0x%x", str.c_str(), co.version); PyObject *result = co.Encode(); co.cache = NULL; //avoid a copy return result; }
void CachedObjectMgr::_UpdateCache(const PyRep *objectID, PyBuffer **buffer) { //this is the hard one.. CacheRecord *r = new CacheRecord; r->timestamp = Win32TimeNow(); r->objectID = (PyRep*)objectID; PyIncRef(objectID); // retake ownership r->cache = *buffer; *buffer = NULL; r->version = CRC32::Generate( &r->cache->content()[0], r->cache->content().size() ); const std::string str = OIDToString(objectID); //find and destroy any older version of this object. CachedObjMapItr res = m_cachedObjects.find(str); if(res != m_cachedObjects.end()) { sLog.Debug("CachedObjMgr","Destroying old cached object with ID '%s' of length %u with checksum 0x%x", str.c_str(), res->second->cache->content().size(), res->second->version); SafeDelete( res->second ); } sLog.Debug("CachedObjMgr","Registering new cached object with ID '%s' of length %u with checksum 0x%x", str.c_str(), r->cache->content().size(), r->version); m_cachedObjects[str] = r; }
void UserError::AddKeyword( const char* name, PyRep* value ) { // We need 2 refs ... the first one is given to us, // but we must create the second one ... PyIncRef( value ); _GetTupleKeywords()->SetItemString( name, value ); _GetDictKeywords()->SetItemString( name, value ); }
PyCallArgs::PyCallArgs(Client *c, PyTuple* tup, PyDict* dict) : client(c), tuple(tup) { PyIncRef( tup ); PyDict::const_iterator cur, end; cur = dict->begin(); end = dict->end(); for(; cur != end; cur++) { if(!cur->first->IsString()) { _log(SERVICE__ERROR, "Non-string key in call named arguments. Skipping."); cur->first->Dump(SERVICE__ERROR, " "); continue; } byname[ cur->first->AsString()->content() ] = cur->second; PyIncRef( cur->second ); } }
CRowSet* CFilterRowSet::NewRowset( PyRep* key ) { DBRowDescriptor* rowDesc = _GetRowDesc(); PyIncRef( rowDesc ); CRowSet* row = new CRowSet( &rowDesc ); dict().SetItem( key , row ); return row; }
PyPackedRow* CIndexedRowSet::NewRow( PyRep* key ) { DBRowDescriptor* rowDesc = _GetRowDesc(); PyIncRef( rowDesc ); PyPackedRow* row = new PyPackedRow( rowDesc ); dict().SetItem( key , row ); return row; }
PyException& PyException::operator=( const PyException& oth ) { PySafeDecRef( ssException ); ssException = oth.ssException; if( NULL != ssException ) PyIncRef( ssException ); return *this; }
PyResult& PyResult::operator=( const PyResult& oth ) { PySafeDecRef( ssResult ); ssResult = oth.ssResult; if( NULL != ssResult ) PyIncRef( ssResult ); return *this; }
PyPackedRow* CRowSet::NewRow() { DBRowDescriptor* rowDesc = _GetRowDesc(); PyIncRef( rowDesc ); PyPackedRow* row = new PyPackedRow( rowDesc ); list().AddItem( row ); return row; }
PyPacket *PyPacket::Clone() const { PyPacket *res = new PyPacket(); res->type_string = type_string; res->type = type; res->source = source; res->dest = dest; res->userid = userid; res->payload = (PyTuple *) payload; PyIncRef(payload); if(named_payload == NULL) { res->named_payload = NULL; } else { res->named_payload = (PyDict *) named_payload; PyIncRef(named_payload); } return res; }
PyObject *CachedObjectMgr::CacheRecord::EncodeHint() const { objectCaching_CachedObject_spec spec; spec.objectID = objectID; PyIncRef(objectID); spec.nodeID = HackCacheNodeID; spec.timestamp = timestamp; spec.version = version; return spec.Encode(); }
/** * LoadCachedFromFile * * Load a cached object from file. */ bool CachedObjectMgr::LoadCachedFromFile(const std::string &cacheDir, const PyRep *objectID) { const std::string str = OIDToString(objectID); std::string filename(cacheDir); filename += "/" + str + ".cache"; FILE *f = fopen(filename.c_str(), "rb"); if(f == NULL) return false; CacheFileHeader header; if(fread(&header, sizeof(header), 1, f) != 1) { fclose(f); return false; } /* check if its a valid cache file */ if(header.magic != CacheFileMagic) { fclose(f); return false; } Buffer* buf = new Buffer( header.length ); if( fread( &(*buf)[0], sizeof( uint8 ), header.length, f ) != header.length ) { SafeDelete( buf ); fclose( f ); return false; } fclose( f ); CachedObjMapItr res = m_cachedObjects.find( str ); if( res != m_cachedObjects.end() ) SafeDelete( res->second ); CacheRecord* cache = new CacheRecord; cache->objectID = (PyRep*)objectID; PyIncRef(objectID); cache->cache = new PyBuffer( &buf ); cache->timestamp = header.timestamp; cache->version = header.version; m_cachedObjects[ str ] = cache; return true; }
PyTuple *EVENotificationStream::Encode() { PyTuple *t4 = new PyTuple(2); t4->items[0] = new PyInt(1); //see notes in other objects about what we could do to avoid this clone. t4->items[1] = args; PyIncRef(args); PyTuple *t3 = new PyTuple(2); t3->items[0] = new PyInt(0); t3->items[1] = t4; PyTuple *t2 = new PyTuple(2); t2->items[0] = new PyInt(0); t2->items[1] = new PySubStream(t3); PyTuple *t1 = new PyTuple(2); t1->items[0] = t2; t1->items[1] = new PyNone(); return(t1); /* //remoteObject if(remoteObject == 0) arg_tuple->items[0] = new PyString(remoteObjectStr.c_str()); else arg_tuple->items[0] = new PyInt(remoteObject); //method name arg_tuple->items[1] = new PyString(method.c_str()); //args //TODO: we dont really need to clone this if we can figure out a way to say "this is read only" //or if we can change this encode method to consume the PyCallStream (which will almost always be the case) arg_tuple->items[2] = args->Clone(); //options if(included_options == 0) { arg_tuple->items[3] = new PyNone(); } else { PyDict *d = new PyDict(); arg_tuple->items[3] = d; if(included_options & oMachoVersion) { d->items[ new PyString("machoVersion") ] = new PyInt( macho_version ); } } return(arg_tuple); */ }
void CachedObjectMgr::UpdateCacheFromSS(const std::string &objectID, PySubStream **in_cached_data) { PyCachedObjectDecoder cache; if(!cache.Decode(in_cached_data)) { _log(SERVICE__ERROR, "Failed to decode cache stream"); return; } PyString* str = new PyString( objectID ); PyBuffer* buf = cache.cache->data(); PyIncRef( buf ); _UpdateCache(str, &buf); PyDecRef( str ); }
PyDict *DBResultToPackedRowDict( DBQueryResult &result, uint32 key_index ) { DBRowDescriptor *header = new DBRowDescriptor( result ); PyDict *res = new PyDict(); DBResultRow row; for( uint32 i = 0; result.GetRow( row ); i++ ) { res->SetItem( DBColumnToPyRep(row, key_index), CreatePackedRow( row, header ) ); PyIncRef( header ); } PyDecRef( header ); return res; }
PyList *DBResultToPackedRowList( DBQueryResult &result ) { DBRowDescriptor *header = new DBRowDescriptor( result ); PyList *res = new PyList( result.GetRowCount() ); DBResultRow row; for( uint32 i = 0; result.GetRow( row ); i++ ) { res->SetItem( i, CreatePackedRow( row, header ) ); PyIncRef( header ); } PyDecRef( header ); return res; }
PyObject *PyCachedObjectDecoder::EncodeHint() { PyTuple *arg_tuple = new PyTuple(3); PyTuple *versiont = new PyTuple(2); versiont->items[0] = new PyLong(timestamp); versiont->items[1] = new PyInt(version); arg_tuple->items[0] = objectID; PyIncRef(objectID); arg_tuple->items[1] = new PyInt(nodeID); arg_tuple->items[2] = versiont; return new PyObject( new PyString( "util.CachedObject" ), arg_tuple ); }
/* function not used */ PyTuple *DBResultToPackedRowListTuple( DBQueryResult &result ) { DBRowDescriptor * header = new DBRowDescriptor( result ); size_t row_count = result.GetRowCount(); PyList * list = new PyList( row_count ); DBResultRow row; uint32 i = 0; while( result.GetRow(row) ) { list->SetItem( i++, CreatePackedRow( row, header ) ); PyIncRef( header ); } PyTuple * root = new PyTuple(2); root->SetItem( 0, header ); root->SetItem( 1, list ); return root; }
PyRep *StationDB::GetSolarSystem(uint32 solarSystemID) { PyRep* ret = (PyRep*)g_station_db_storage.find(solarSystemID); PyIncRef(ret); return ret; // old code for reference. /*DBQueryResult res; if(!sDatabase.RunQuery(res, "SELECT " " solarSystemID," // nr " solarSystemName," // string " x, y, z," // double " radius," // double " security," // double " constellationID," // nr " factionID," // nr " sunTypeID," // nr " regionID," //crap " NULL AS allianceID," // nr " 0 AS sovereigntyLevel," // nr " 0 AS constellationSovereignty" // nr " FROM mapSolarSystems" " WHERE solarSystemID=%u", solarSystemID )) { _log(SERVICE__ERROR, "Error in GetSolarSystem query: %s", res.error.c_str()); return NULL; } DBResultRow row; if(!res.GetRow(row)) { _log(SERVICE__ERROR, "Error in GetSolarSystem query: no solarsystem for id %d", solarSystemID); return NULL; } return(DBRowToRow(row));*/ }
bool PyCachedCall::Decode(PySubStream **in_ss) { PySubStream *ss = *in_ss; //consume *in_ss = NULL; PySafeDecRef( result ); ss->DecodeData(); if(ss->decoded() == NULL) { sLog.Error("PyCachedCall","Unable to decode initial stream for PyCachedCall"); PyDecRef( ss ); return false; } if(!ss->decoded()->IsDict()) { sLog.Error("PyCachedCall","Cached call substream does not contain a dict: %s", ss->decoded()->TypeString()); PyDecRef( ss ); return false; } PyDict *po = (PyDict *) ss->decoded(); PyDict::const_iterator cur, end; cur = po->begin(); end = po->end(); for(; cur != end; cur++) { if(!cur->first->IsString()) continue; PyString *key = (PyString *) cur->first; if( key->content() == "lret" ) { result = cur->second; PyIncRef(result); } } PyDecRef( ss ); return result != NULL; }
EVENotificationStream *EVENotificationStream::Clone() const { EVENotificationStream *res = new EVENotificationStream(); res->args = (PyTuple *) args; PyIncRef(args); return res; }
bool PyPacket::Decode(PyRep **in_packet) { PyRep *packet = *in_packet; //consume *in_packet = NULL; PySafeDecRef(payload); PySafeDecRef(named_payload); if(packet->IsChecksumedStream()) { PyChecksumedStream* cs = packet->AsChecksumedStream(); //TODO: check cs->checksum packet = cs->stream(); PyIncRef( packet ); PyDecRef( cs ); } //Dragon nuance... it gets wrapped again if(packet->IsSubStream()) { PySubStream* ss = packet->AsSubStream(); ss->DecodeData(); if(ss->decoded() == NULL) { codelog(NET__PACKET_ERROR, "failed: unable to decode initial packet substream."); PyDecRef(packet); return false; } packet = ss->decoded(); PyIncRef( packet ); PyDecRef( ss ); } if(!packet->IsObject()) { codelog(NET__PACKET_ERROR, "failed: packet body is not an 'Object': %s", packet->TypeString()); PyDecRef(packet); return false; } PyObject *packeto = (PyObject *) packet; type_string = packeto->type()->content(); if(!packeto->arguments()->IsTuple()) { codelog(NET__PACKET_ERROR, "failed: packet body does not contain a tuple"); PyDecRef(packet); return false; } PyTuple *tuple = (PyTuple *) packeto->arguments(); if(tuple->items.size() != 7) { codelog(NET__PACKET_ERROR, "failed: packet body does not contain a tuple of length 7"); PyDecRef(packet); return false; } if(!tuple->items[0]->IsInt()) { codelog(NET__PACKET_ERROR, "failed: First main tuple element is not an integer"); PyDecRef(packet); return false; } PyInt *typer = (PyInt *) tuple->items[0]; switch(typer->value()) { case AUTHENTICATION_REQ: case AUTHENTICATION_RSP: case IDENTIFICATION_REQ: case IDENTIFICATION_RSP: case CALL_REQ: case CALL_RSP: case TRANSPORTCLOSED: case RESOLVE_REQ: case RESOLVE_RSP: case NOTIFICATION: case ERRORRESPONSE: case SESSIONCHANGENOTIFICATION: case SESSIONINITIALSTATENOTIFICATION: case PING_REQ: case PING_RSP: type = (MACHONETMSG_TYPE) typer->value(); break; default: codelog(NET__PACKET_ERROR, "failed: Unknown message type %"PRIu64, typer->value()); PyDecRef(packet); return false; break; } //source address if(!source.Decode(tuple->items[1])) { //error printed in decoder PyDecRef(packet); return false; } //dest address if(!dest.Decode(tuple->items[2])) { //error printed in decoder PyDecRef(packet); return false; } if(tuple->items[3]->IsInt()) { PyInt *i = (PyInt *) tuple->items[3]; userid = i->value(); } else if(tuple->items[3]->IsNone()) { userid = 0; } else { codelog(NET__PACKET_ERROR, "failed: User ID has invalid type"); PyDecRef(packet); return false; } //payload if(!(tuple->items[4]->IsBuffer() || tuple->items[4]->IsTuple())) { codelog(NET__PACKET_ERROR, "failed: Fifth main tuple element is not a tuple"); PyDecRef(packet); return false; } payload = (PyTuple *) tuple->items[4]; tuple->items[4] = NULL; //we keep this one //options dict if(tuple->items[5]->IsNone()) { named_payload = NULL; } else if(tuple->items[5]->IsDict()) { named_payload = (PyDict *) tuple->items[5]; tuple->items[5] = NULL; //we keep this too. } else { codelog(NET__PACKET_ERROR, "failed: Sixth main tuple element is not a dict"); PyDecRef(packet); return false; } PyDecRef(packet); return true; }
bool PyCachedObjectDecoder::Decode(PySubStream **in_ss) { PySubStream *ss = *in_ss; //consume *in_ss = NULL; PySafeDecRef( cache ); PySafeDecRef( objectID ); ss->DecodeData(); if(ss->decoded() == NULL) { sLog.Error("PyCachedObjectDecoder","Unable to decode initial stream for PycachedObject"); PyDecRef( ss ); return false; } if(!ss->decoded()->IsObject()) { sLog.Error("PyCachedObjectDecoder","Cache substream does not contain an object: %s", ss->decoded()->TypeString()); PyDecRef( ss ); return false; } PyObject *po = (PyObject *) ss->decoded(); //TODO: could check type string, dont care... (should be objectCaching.CachedObject) if(!po->arguments()->IsTuple()) { sLog.Error("PyCachedObjectDecoder","Cache object's args is not a tuple: %s", po->arguments()->TypeString()); PyDecRef( ss ); return false; } PyTuple *args = (PyTuple *) po->arguments(); if(args->items.size() != 7) { sLog.Error("PyCachedObjectDecoder","Cache object's args tuple has %lu elements instead of 7", args->items.size()); PyDecRef( ss ); return false; } if(!args->items[0]->IsTuple()) { sLog.Error("PyCachedObjectDecoder","Cache object's arg %d is not a Tuple: %s", 0, args->items[0]->TypeString()); PyDecRef( ss ); return false; } //ignore unknown [1] /*if(!args->items[1]->IsInt()) { _log(CLIENT__ERROR, "Cache object's arg %d is not a None: %s", 1, args->items[1]->TypeString()); PyDecRef( ss ); return false; }*/ if(!args->items[2]->IsInt()) { sLog.Error("PyCachedObjectDecoder","Cache object's arg %d is not an Integer: %s", 2, args->items[2]->TypeString()); PyDecRef( ss ); return false; } if(!args->items[3]->IsInt()) { sLog.Error("PyCachedObjectDecoder","Cache object's arg %d is not an Integer: %s", 3, args->items[3]->TypeString()); PyDecRef( ss ); return false; } if(!args->items[5]->IsInt()) { sLog.Error("PyCachedObjectDecoder","Cache object's arg %d is not a : %s", 5, args->items[5]->TypeString()); PyDecRef( ss ); return false; } PyTuple *objVt = (PyTuple *) args->items[0]; if(!objVt->items[0]->IsInt()) { sLog.Error("PyCachedObjectDecoder","Cache object's version tuple %d is not an Integer: %s", 0, objVt->items[0]->TypeString()); PyDecRef( ss ); return false; } if(!objVt->items[1]->IsInt()) { sLog.Error("PyCachedObjectDecoder","Cache object's version tuple %d is not an Integer: %s", 1, objVt->items[1]->TypeString()); PyDecRef( ss ); return false; } PyInt *nodeidr = (PyInt *) args->items[2]; PyInt *sharedr = (PyInt *) args->items[3]; PyInt *compressedr = (PyInt *) args->items[5]; PyInt *timer = (PyInt *) objVt->items[0]; PyInt *versionr = (PyInt *) objVt->items[1]; timestamp = timer->value(); version = versionr->value(); nodeID = nodeidr->value(); shared = ( sharedr->value() != 0 ); compressed = ( compressedr->value() != 0 ); //content (do this as the last thing, since its the heavy lifting): if(args->items[4]->IsSubStream()) { cache = (PySubStream *) args->items[4]; //take it args->items[4] = NULL; } else if(args->items[4]->IsBuffer()) { //this is a data buffer, likely compressed. PyBuffer* buf = args->items[4]->AsBuffer(); PyIncRef( buf ); cache = new PySubStream( buf ); } else if(args->items[4]->IsString()) { //this is a data buffer, likely compressed, not sure why it comes through as a string... PyString* str = args->items[4]->AsString(); cache = new PySubStream( new PyBuffer( *str ) ); } else { sLog.Error("PyCachedObjectMgr", "Cache object's arg %d is not a substream or buffer: %s", 4, args->items[4]->TypeString()); PyDecRef( ss ); return false; } objectID = args->items[6]; PyIncRef(objectID); PyDecRef( ss ); return true; }