/* * Internal static function for decoding of the single publication cell. */ static int decodePublicationCell( const unsigned char *cell_addr, size_t cell_offset, size_t cell_size, GTPublicationsFile_Cell *cell) { int hash_alg; if (cell_size < GTPublicationsFile_CellOffset_publicationImprint + 1) { return GT_INVALID_FORMAT; } cell->publication_identifier = readInt64( cell_addr + GTPublicationsFile_CellOffset_publicationIdentifier); hash_alg = cell_addr[GTPublicationsFile_CellOffset_publicationImprint]; if (!GT_isSupportedHashAlgorithm(hash_alg)) { return GT_UNTRUSTED_HASH_ALGORITHM; } cell->publication_imprint_size = GT_getHashSize(hash_alg) + 1; if (cell->publication_imprint_size <= 1) { return GT_CRYPTO_FAILURE; } if (cell_size < (cell->publication_imprint_size + GTPublicationsFile_CellOffset_publicationImprint)) { return GT_INVALID_FORMAT; } cell->publication_imprint_offset = cell_offset + GTPublicationsFile_CellOffset_publicationImprint; return GT_OK; }
/** * Function used in the new packets table to create suback packets. * @param aHeader the MQTT header byte * @param data the rest of the packet * @param datalen the length of the rest of the packet * @return pointer to the packet structure */ void* MQTTPacket_suback(unsigned char aHeader, char* data, size_t datalen, networkHandles* handler) { Suback* pack = malloc(sizeof(Suback)); char* curdata = data; int mqtt_version = get_client_mqtt_version_from_network_handler(handler); FUNC_ENTRY; pack->header.byte = aHeader; if(mqtt_version == MQTTVERSION_YUNBA_3_1) { pack->msgId = readInt64(&curdata); }else{ pack->msgId = readInt(&curdata); } pack->qoss = ListInitialize(); while ((size_t)(curdata - data) < datalen) { int* newint; newint = malloc(sizeof(int)); *newint = (int)readChar(&curdata); ListAppend(pack->qoss, newint, sizeof(int)); } FUNC_EXIT; return pack; }
void BaseSerializedObj::readInt64Array(int64_t** o) { uint64_t size = readUInt64(); *o = (int64_t *) malloc(size*byteSize_writeInt64); uint64_t i = 0; while(i != size) { (*o)[i] = readInt64(); i++; } }
/* * Internal static function for decoding of the single key hash cell. */ static int decodeKeyHashCell( const unsigned char *cell_addr, size_t cell_offset, size_t cell_size, GTPublicationsFile_KeyHashCell *cell) { int hash_alg; long long key_publication_time; if (cell_size < GTPublicationsFile_KeyHashCellOffset_keyHashImprint + 1) { return GT_INVALID_FORMAT; } key_publication_time = readInt64(cell_addr + GTPublicationsFile_KeyHashCellOffset_keyPublicationTime); cell->key_publication_time = key_publication_time; /* The following condition checks for time_t overflows on 32-bit platforms * and should be optimized away if time_t is at least 64 bits long. */ if (sizeof(time_t) < 8 && cell->key_publication_time != key_publication_time) { /* This error code assumes that no-one uses 32-bit time_t after the * year of 2038, so it is safe to say that file format is invalid * before that. */ return GT_INVALID_FORMAT; } hash_alg = cell_addr[GTPublicationsFile_KeyHashCellOffset_keyHashImprint]; if (!GT_isSupportedHashAlgorithm(hash_alg)) { return GT_UNTRUSTED_HASH_ALGORITHM; } cell->key_hash_imprint_size = GT_getHashSize(hash_alg) + 1; if (cell->key_hash_imprint_size <= 1) { return GT_CRYPTO_FAILURE; } if (cell_size < (cell->key_hash_imprint_size + GTPublicationsFile_KeyHashCellOffset_keyHashImprint)) { return GT_INVALID_FORMAT; } cell->key_hash_imprint_offset = cell_offset + GTPublicationsFile_KeyHashCellOffset_keyHashImprint; return GT_OK; }
int MQTTDeserialize_extendedcmd(unsigned char* dup, int* qos, unsigned char* retained, uint64_t* packetid, EXTED_CMD* cmd, int *status, void** payload, int* payloadlen, unsigned char* buf, int buflen) { MQTTHeader header = {0}; unsigned char* curdata = buf; unsigned char* enddata = NULL; int rc = 0; int mylen = 0; header.byte = readChar(&curdata); if (header.bits.type != EXTCMD) goto exit; *dup = header.bits.dup; *qos = header.bits.qos; *retained = header.bits.retain; curdata += (rc = MQTTPacket_decodeBuf(curdata, &mylen)); /* read remaining length */ enddata = curdata + mylen; // printf("%s, retain len:%d, qos:%d\n", __func__, mylen, *qos); // if (*qos > 0) *packetid = readInt64(&curdata); *cmd = (*curdata); // printf("%s, cmd: %d\n", __func__, *cmd); curdata++; *status = (*curdata); // printf("%s, status: %d\n", __func__, *status); curdata++; *payloadlen = readInt(&curdata);; // printf("%s, payload len: %d\n", __func__, *payloadlen); *payload = curdata; // printf("%s, payload: %s\n", __func__, *payload); rc = 1; exit: return rc; }
/** * Deserializes the supplied (wire) buffer into suback data * @param packetid returned integer - the MQTT packet identifier * @param maxcount - the maximum number of members allowed in the grantedQoSs array * @param count returned integer - number of members in the grantedQoSs array * @param grantedQoSs returned array of integers - the granted qualities of service * @param buf the raw buffer data, of the correct length determined by the remaining length field * @param buflen the length in bytes of the data in the supplied buffer * @return error code. 1 is success, 0 is failure */ int MQTTDeserialize_suback(uint64_t* packetid, int maxcount, int* count, int grantedQoSs[], unsigned char* buf, int buflen) { MQTTHeader header = {0}; unsigned char* curdata = buf; unsigned char* enddata = NULL; int rc = 0; int mylen; FUNC_ENTRY; header.byte = readChar(&curdata); if (header.bits.type != SUBACK) goto exit; curdata += (rc = MQTTPacket_decodeBuf(curdata, &mylen)); /* read remaining length */ enddata = curdata + mylen; if (enddata - curdata < 2) goto exit; *packetid = readInt64(&curdata); *count = 0; while (curdata < enddata) { if (*count > maxcount) { rc = -1; goto exit; } grantedQoSs[(*count)++] = readChar(&curdata); } rc = 1; exit: FUNC_EXIT_RC(rc); return rc; }
double InputStream::readDouble() { union { int64 asInt; double asDouble; } n; n.asInt = readInt64(); return n.asDouble; }
double BaseSerializedObj::readDouble() { unsigned char* a = (unsigned char* ) readInt64(); double r = *(double *) &a; return r; }
static bool readItemRecursive(WebCore::HistoryItem* newItem, const char** pData, int length) { if (!pData || length < HISTORY_MIN_SIZE) { ALOGW("readItemRecursive() bad params; pData=%p length=%d", pData, length); return false; } const char* data = *pData; const char* end = data + length; String content; // Read the original url if (readString(data, end, content, "Original url")) newItem->setOriginalURLString(content); else return false; // Read the url if (readString(data, end, content, "Url")) newItem->setURLString(content); else return false; // Read the title if (readString(data, end, content, "Title")) newItem->setTitle(content); else return false; // Generate a new ResourceRequest object for populating form information. // Read the form content type WTF::String formContentType; if (!readString(data, end, formContentType, "Content type")) return false; // Read the form data size unsigned formDataSize; if (!readUnsigned(data, end, formDataSize, "Form data size")) return false; // Read the form data WTF::RefPtr<WebCore::FormData> formData; if (formDataSize) { ALOGV("Reading Form data %d %.*s", formDataSize, formDataSize, data); if ((end < data) || ((size_t)(end - data) < formDataSize)) { ALOGW("\tNot enough data to read form data; returning"); return false; } formData = WebCore::FormData::create(data, formDataSize); data += formDataSize; // Read the identifier int64_t id; if (!readInt64(data, end, id, "Form id")) return false; if (id) formData->setIdentifier(id); } // Set up the form info if (formData != NULL) { WebCore::ResourceRequest r; r.setHTTPMethod("POST"); r.setHTTPContentType(formContentType); r.setHTTPBody(formData); newItem->setFormInfoFromRequest(r); } // Read the target if (readString(data, end, content, "Target")) newItem->setTarget(content); else return false; AndroidWebHistoryBridge* bridge = newItem->bridge(); ALOG_ASSERT(bridge, "There should be a bridge object during inflate"); // Read the screen scale float fValue; if (readFloat(data, end, fValue, "Screen scale")) bridge->setScale(fValue); else return false; // Read the text wrap scale if (readFloat(data, end, fValue, "Text wrap scale")) bridge->setTextWrapScale(fValue); else return false; // Read scroll position. int scrollX; if (!readInt(data, end, scrollX, "Scroll pos x")) return false; int scrollY; if (!readInt(data, end, scrollY, "Scroll pos y")) return false; newItem->setScrollPoint(IntPoint(scrollX, scrollY)); // Read the document state unsigned docStateCount; if (!readUnsigned(data, end, docStateCount, "Doc state count")) return false; if (docStateCount) { // Create a new vector and reserve enough space for the document state. WTF::Vector<WTF::String> docState; docState.reserveCapacity(docStateCount); while (docStateCount--) { // Read a document state string if (readString(data, end, content, "Document state")) docState.append(content); else return false; } newItem->setDocumentState(docState); } // Read is target item bool c; if (readBool(data, end, c, "Target item")) newItem->setIsTargetItem(c); else return false; // Read the child count unsigned count; if (!readUnsigned(data, end, count, "Child count")) return false; *pData = data; if (count) { while (count--) { // No need to check the length each time because read_item_recursive // will return null if there isn't enough data left to parse. WTF::RefPtr<WebCore::HistoryItem> child = WebCore::HistoryItem::create(); // Set a bridge that will not call into java. child->setBridge(new WebHistoryItem(static_cast<WebHistoryItem*>(bridge))); // Read the child item. if (!readItemRecursive(child.get(), pData, end - data)) return false; child->bridge()->setActive(); newItem->addChildItem(child); } } return true; }
bool IniParser::readBool(const string §Str, const string &keyStr) { long long num = readInt64(sectStr, keyStr); return num != 0; }
void property::update(bitstream &stream) { #ifdef TEST_SKIPPING uint32_t cur = stream.position(); property::skip(stream, prop); uint32_t diff1 = stream.position() - cur; stream.seekBackward(diff1); // assert that seeking backwards worked and we have a clean state assert ( cur == stream.position() ); #endif // TEST_SKIPPING switch (prop->getType()) { // Read Integer case sendprop::T_Int: readInt(stream, this); break; // Read Float case sendprop::T_Float: set(readFloat(stream, this)); break; // Read 3D Vector case sendprop::T_Vector: { std::array<float, 3> vec; readVector(vec, stream, this); set(std::move(vec)); } break; // Read 2D case sendprop::T_VectorXY: { std::array<float, 2> vec; readVectorXY(vec, stream, this); set(std::move(vec)); } break; // Read String case sendprop::T_String: { char str[PROPERTY_MAX_STRING_LENGTH + 1]; uint32_t length = readString(str, stream); set(std::string(str, length)); } break; // Read Array case sendprop::T_Array:{ std::vector<property> vec; readArray(vec, stream, prop); set(std::move(vec)); } break; // Read 64 bit Integer case sendprop::T_Int64: readInt64(stream, this); break; default: BOOST_THROW_EXCEPTION( propertyInvalidType() << (EArgT<1, uint32_t>::info(prop->getType())) ); break; } #ifdef TEST_SKIPPING uint32_t diff2 = stream.position() - cur; if (diff2 != diff1) { std::cout << "Skip failed: " << diff1 << " (skip) " << diff2 << " (read) " << std::endl; std::cout << "Type ID: " << prop->getType() << std::endl; exit(0); } #endif // TEST_SKIPPING }
string psoCat::odbcKeyToStr( uint32_t length ) { psoKeyFieldDefinition * keyODBC; int numKeys, i; string outStr; keyODBC = (psoKeyFieldDefinition *) keyDef; numKeys = keyDefLength / sizeof(psoKeyFieldDefinition); psoaGetKeyOffsets( keyODBC, numKeys, keyOffsets ); for ( i = 0; i < numKeys; ++i ) { string s; switch( keyODBC[i].type ) { case PSO_KEY_INTEGER: s = readInt32( &key[keyOffsets[i]] ); break; case PSO_KEY_BIGINT: s = readInt64( &key[keyOffsets[i]] ); break; case PSO_KEY_BINARY: readBinary( s, keyODBC[i].length, &key[keyOffsets[i]] ); break; case PSO_KEY_CHAR: readString( s, keyODBC[i].length, &key[keyOffsets[i]] ); break; case PSO_KEY_VARCHAR: case PSO_KEY_LONGVARCHAR: readString( s, length - keyOffsets[i], &key[keyOffsets[i]] ); break; case PSO_KEY_VARBINARY: case PSO_KEY_LONGVARBINARY: readBinary( s, length - keyOffsets[i], &key[keyOffsets[i]] ); break; case PSO_KEY_DATE: s = readDate( &key[keyOffsets[i]] ); break; case PSO_KEY_TIME: s = readTime( &key[keyOffsets[i]] ); break; case PSO_KEY_TIMESTAMP: s = readTimeStamp( &key[keyOffsets[i]] ); break; } outStr += s; if ( i < numKeys-1) outStr += ", "; } return outStr; }
string psoCat::odbcFieldToStr( uint32_t length ) { psoFieldDefinition * fieldODBC; int numFields, i; string outStr; fieldODBC = (psoFieldDefinition *) fieldDef; numFields = fieldDefLength / sizeof(psoFieldDefinition); psoaGetFieldOffsets( fieldODBC, numFields, fieldOffsets ); for ( i = 0; i < numFields; ++i ) { string s; switch( fieldODBC[i].type ) { case PSO_TINYINT: s = readInt8( &buffer[fieldOffsets[i]] ); break; case PSO_SMALLINT: s = readInt16( &buffer[fieldOffsets[i]] ); break; case PSO_INTEGER: s = readInt32( &buffer[fieldOffsets[i]] ); break; case PSO_BIGINT: s = readInt64( &buffer[fieldOffsets[i]] ); break; case PSO_BINARY: readBinary( s, fieldODBC[i].vals.length, &buffer[fieldOffsets[i]] ); break; case PSO_CHAR: readString( s, fieldODBC[i].vals.length, &buffer[fieldOffsets[i]] ); break; case PSO_NUMERIC: s = readDecimal( fieldODBC[i].vals.decimal.precision, fieldODBC[i].vals.decimal.scale, &buffer[fieldOffsets[i]] ); break; case PSO_VARCHAR: case PSO_LONGVARCHAR: readString( s, length - fieldOffsets[i], &buffer[fieldOffsets[i]] ); break; case PSO_VARBINARY: case PSO_LONGVARBINARY: readBinary( s, length - fieldOffsets[i], &buffer[fieldOffsets[i]] ); break; case PSO_REAL: s = readFloat32( &buffer[fieldOffsets[i]] ); break; case PSO_DOUBLE: s = readFloat64( &buffer[fieldOffsets[i]] ); break; case PSO_DATE: s = readDate( &buffer[fieldOffsets[i]] ); break; case PSO_TIME: s = readTime( &buffer[fieldOffsets[i]] ); break; case PSO_TIMESTAMP: s = readTimeStamp( &buffer[fieldOffsets[i]] ); break; } outStr += s; if ( i < numFields-1) outStr += ", "; } return outStr; }
/* * Internal static function for decoding of the header fields. */ static int decodeHeader(GTPublicationsFile *pubfile) { size_t data_block_size; size_t hash_data_block_size; assert(sizeof(int) >= 4); assert(sizeof(long long) >= 8); if (pubfile->data_length < 1) { return GT_INVALID_FORMAT; } pubfile->version = readUInt16(pubfile->data + GTPublicationsFile_HeaderOffset_version); if (pubfile->version != GTPublicationsFile_CurrentVersion) { return GT_UNSUPPORTED_FORMAT; } if (pubfile->data_length < GTPublicationsFile_HeaderLength) { return GT_INVALID_FORMAT; } pubfile->first_publication_ident = readInt64(pubfile->data + GTPublicationsFile_HeaderOffset_firstPublicationIdent); pubfile->data_block_begin = readInt32(pubfile->data + GTPublicationsFile_HeaderOffset_dataBlockBegin); pubfile->publication_cell_size = readUInt16(pubfile->data + GTPublicationsFile_HeaderOffset_publicationCellSize); pubfile->number_of_publications = readInt32(pubfile->data + GTPublicationsFile_HeaderOffset_numberOfPublications); pubfile->key_hashes_begin = readInt32(pubfile->data + GTPublicationsFile_HeaderOffset_keyHashesBegin); pubfile->key_hash_cell_size = readUInt16(pubfile->data + GTPublicationsFile_HeaderOffset_keyHashCellSize); pubfile->number_of_key_hashes = readUInt16(pubfile->data + GTPublicationsFile_HeaderOffset_numberOfKeyHashes); pubfile->pub_reference_begin = readInt32(pubfile->data + GTPublicationsFile_HeaderOffset_pubReferenceBegin); pubfile->signature_block_begin = readInt32(pubfile->data + GTPublicationsFile_HeaderOffset_signatureBlockBegin); if (pubfile->data_block_begin < GTPublicationsFile_HeaderLength || pubfile->data_block_begin > pubfile->data_length) { return GT_INVALID_FORMAT; } if (pubfile->key_hashes_begin < pubfile->data_block_begin || pubfile->key_hashes_begin > pubfile->data_length) { return GT_INVALID_FORMAT; } if (pubfile->pub_reference_begin < pubfile->key_hashes_begin || pubfile->pub_reference_begin > pubfile->data_length) { return GT_INVALID_FORMAT; } if (pubfile->signature_block_begin < pubfile->pub_reference_begin || pubfile->signature_block_begin > pubfile->data_length) { return GT_INVALID_FORMAT; } data_block_size = pubfile->key_hashes_begin - pubfile->data_block_begin; hash_data_block_size = pubfile->signature_block_begin - pubfile->key_hashes_begin; /* Using integer division instead of multiply ensures that there will * be no overflows and thus no false positives in case of invalid values * of publication_cell_size or number_of_publications. */ if (data_block_size / pubfile->publication_cell_size < pubfile->number_of_publications) { return GT_INVALID_FORMAT; } if (hash_data_block_size / pubfile->key_hash_cell_size < pubfile->number_of_key_hashes) { return GT_INVALID_FORMAT; } return GT_OK; }