void test_block_codec() { std::vector<size_t> sizes = {1, 16, BlockCodec::block_size - 1, BlockCodec::block_size}; for (auto size: sizes) { std::vector<uint32_t> values(size); std::generate(values.begin(), values.end(), []() { return (uint32_t)rand() % (1 << 12); }); for (size_t tcase = 0; tcase < 2; ++tcase) { // test both undefined and given sum_of_values uint32_t sum_of_values(-1); if (tcase == 1) { sum_of_values = std::accumulate(values.begin(), values.end(), 0); } std::vector<uint8_t> encoded; BlockCodec::encode(values.data(), sum_of_values, values.size(), encoded); std::vector<uint32_t> decoded(values.size()); uint8_t const* out = BlockCodec::decode(encoded.data(), decoded.data(), sum_of_values, values.size()); BOOST_REQUIRE_EQUAL(encoded.size(), out - encoded.data()); BOOST_REQUIRE_EQUAL_COLLECTIONS(values.begin(), values.end(), decoded.begin(), decoded.end()); } } }
void ViBenchMarker3::nextFile() { if(mFiles.isEmpty()) { quit(); } else { initParams(); for(int i = 0; i < mParamsStart.size(); ++i) mParamsCurrent[i] = mParamsStart[i]; mCurrentFile = mFiles.dequeue(); mBestMatthews = 0; printFileHeader(); mCurrentObject->clearBuffers(); mCurrentObject.setNull(); mCurrentObject = ViAudioObject::create(); mCurrentObject->setFilePath(ViAudio::Target, mCurrentFile); QObject::connect(mCurrentObject.data(), SIGNAL(decoded()), this, SLOT(process1())); mCurrentObject->decode(); } }
void test_block_codec() { std::vector<size_t> sizes = {1, 16, BlockCodec::block_size - 1, BlockCodec::block_size}; for(size_t mag=1;mag<25;mag++) { for (auto size: sizes) { std::vector<uint32_t> values(size); std::mt19937 gen(12345); std::uniform_int_distribution<uint32_t> dis(0, (1<<mag)); std::generate(values.begin(), values.end(), [&]() { return dis(gen); }); for (size_t tcase = 0; tcase < 2; ++tcase) { // test both undefined and given sum_of_values uint32_t sum_of_values(-1); if (tcase == 1) { sum_of_values = std::accumulate(values.begin(), values.end(), 0); } std::vector<uint8_t> encoded; BlockCodec::encode(values.data(), sum_of_values, values.size(), encoded); std::vector<uint32_t> decoded(values.size() + BlockCodec::overflow); uint8_t const* out = BlockCodec::decode(encoded.data(), decoded.data(), sum_of_values, values.size()); BOOST_REQUIRE_EQUAL(encoded.size(), out - encoded.data()); BOOST_REQUIRE_EQUAL_COLLECTIONS(values.begin(), values.end(), decoded.begin(), decoded.begin()+ values.size()); } } } }
void for_each (std::function <void (nodeobject::ptr)> f) { hyperleveldb::readoptions const options; std::unique_ptr <hyperleveldb::iterator> it (m_db->newiterator (options)); for (it->seektofirst (); it->valid (); it->next ()) { if (it->key ().size () == m_keybytes) { decodedblob decoded (it->key ().data (), it->value ().data (), it->value ().size ()); if (decoded.wasok ()) { f (decoded.createobject ()); } else { // uh oh, corrupted data! m_journal.fatal << "corrupt nodeobject #" << uint256::fromvoid (it->key ().data ()); } } else { // vfalco note what does it mean to find an // incorrectly sized key? corruption? m_journal.fatal << "bad key size = " << it->key ().size (); } } }
/* USAGE: decode(FILE) * */ bendecoded *decode(char *file) { char *buf; long long len; be_node *n; buf = read_file(file, &len); printf("DECODING: %s\n", file); n = be_decoden(buf, len); if(!n) { printf("\tparsing failed!\n"); } bendecoded *output = decoded(n); unsigned char *info_hash; get_info_hash(&info_hash); printf("announce: %s\n", output->announce); printf("creation date: %lli\n", output->creation_date); printf("created by: %s\n", output->created_by); printf("encoding: %s\n", output->encoding); printf("info -> piece length: %lli\n", output->info->piece_length); printf("info -> pieces: %s\n", output->info->pieces); printf("info -> name: %s\n", output->info->name); printf("(assuming single file mode for now)\n"); printf("info -> length: %lli\n", output->info->length); printf("info -> md5sum: %s\n", output->info->md5sum); printf("info_hash before sha1 is: %s\n", info_hash); free(buf); //be_dump(n); //be_free(n); return output; }
Ref<String> Code39Reader::decodeExtended(std::string encoded){ int length = encoded.length(); std::string tmpDecoded; for (int i = 0; i < length; i++) { char c = encoded[i]; if (c == '+' || c == '$' || c == '%' || c == '/') { char next = encoded[i + 1]; char decodedChar = '\0'; switch (c) { case '+': // +A to +Z map to a to z if (next >= 'A' && next <= 'Z') { decodedChar = (char) (next + 32); } else { throw ReaderException(""); } break; case '$': // $A to $Z map to control codes SH to SB if (next >= 'A' && next <= 'Z') { decodedChar = (char) (next - 64); } else { throw ReaderException(""); } break; case '%': // %A to %E map to control codes ESC to US if (next >= 'A' && next <= 'E') { decodedChar = (char) (next - 38); } else if (next >= 'F' && next <= 'W') { decodedChar = (char) (next - 11); } else { throw ReaderException(""); } break; case '/': // /A to /O map to ! to , and /Z maps to : if (next >= 'A' && next <= 'O') { decodedChar = (char) (next - 32); } else if (next == 'Z') { decodedChar = ':'; } else { throw ReaderException(""); } break; } tmpDecoded.append(1, decodedChar); // bump up i again since we read two characters i++; } else { tmpDecoded.append(1, c); } } Ref<String> decoded(new String(tmpDecoded)); return decoded; }
TilesetPtr TilesetType_CComic2::open(stream::inout_sptr psGraphics, SuppData& suppData) const { filter_sptr filtRead(new filter_ccomic2_unrle(CC2_FIRST_TILE_OFFSET)); filter_sptr filtWrite(new filter_ccomic2_rle(CC2_FIRST_TILE_OFFSET)); stream::filtered_sptr decoded(new stream::filtered()); decoded->open(psGraphics, filtRead, filtWrite, NULL); return TilesetPtr(new Tileset_CComic2(decoded, NUMPLANES_TILES)); }
bool Reader::decodeString(Token& token) { std::string decoded_string; if (!decodeString(token, decoded_string)) return false; Value decoded(decoded_string); currentValue().swapPayload(decoded); currentValue().setOffsetStart(token.start_ - begin_); currentValue().setOffsetLimit(token.end_ - begin_); return true; }
void ViNoiseBatcher::process() { QObject::disconnect(mCurrentObject.data(), SIGNAL(decoded()), this, SLOT(process())); qint64 time; ViNoiseCreator creator; creator.createNoise(mCurrentObject->buffer(ViAudio::Target), mCurrentObject->buffer(ViAudio::Corrupted), mCurrentObject->buffer(ViAudio::CustomMask), mCurrentObject->buffer(ViAudio::Custom)); mCurrentObject->clearBuffer(ViAudio::Target); mCurrentObject->clearBuffer(ViAudio::CustomMask); ViClassificationErrorCollection errors; //do //{ mCurrentObject->clearBuffer(ViAudio::Noise); for(int i = 0; i < mParamsStart.size(); ++i) mDetector->setParameter(mParamsNames[i], mParamsCurrent[i]); if(mDetector->validParameters()) { mTime.restart(); mDetector->detect(mCurrentObject->buffer(ViAudio::Corrupted), mCurrentObject->buffer(ViAudio::Noise)); time = mTime.elapsed(); errors = mDetector->error(mCurrentObject->buffer(ViAudio::Noise), mCurrentObject->buffer(ViAudio::Custom), mThreshold); // Write /* QObject::connect(mCurrentObject.data(), SIGNAL(encoded()), this, SLOT(quit())); mCurrentObject->encode(ViAudio::Noise); return;*/ /*calculateThreshold(mCurrentObject->buffer(ViAudio::Noise), mCurrentObject->buffer(ViAudio::Custom), bestThreshold, bestErrors, bestMatthews, maxNoise); if(bestMatthews > maxMatthews) maxMatthews = bestMatthews; clearProgress();*/ // Write /*mDetector->mask(mCurrentObject->buffer(ViAudio::Noise), mCurrentObject->buffer(ViAudio::NoiseMask), 0.15, 8); QObject::connect(mCurrentObject.data(), SIGNAL(encoded()), this, SLOT(quit())); mCurrentObject->encode(ViAudio::NoiseMask); return;*/ } else { time = 0; } ++mDoneParamIterations; printFileData(errors, time); printTerminal(errors, time); //} //while(nextParam()); nextFile(); }
int main() { std::string to_encode("WWWWWWWWWWWWBWWWWWWWWWWWWBBBWWWWWWWWWWWWWWWWWWWWWWWWBWWWWWWWWWWWWWW") ; std::cout << to_encode << std::endl << " encoded:" << std::endl ; std::string encoded(encode(to_encode)) ; std::cout << encoded << std::endl ; std::string decoded(decode(encoded)) ; std::cout << "Decoded again:\n" ; std::cout << decoded << std::endl ; if(to_encode == decoded) std::cout << "It must have worked!\n" ; return 0 ; }
bool MediaSession::start() { d->startTime = QTime::currentTime(); bool managerOk = d->mediaManager->addSession(this); //Tell the media manager the session is being started. bool pluginOk = d->plugin->start(); connect((QObject*) d->mediaManager->alsaIn(), SIGNAL(readyRead()), (QObject*) this, SLOT(slotReadyRead())); connect((QObject*) d->plugin, SIGNAL(encoded()), (QObject*) this, SLOT(slotEncoded())); connect((QObject*) d->plugin, SIGNAL(decoded()), (QObject*) this, SLOT(slotDecoded())); return managerOk && pluginOk; }
void VPlayer::emitFrame(QImage _frame) { if(state==Stop){ state=Play; emit opened(); } if(state==Play){ frame=QPixmap::fromImage(_frame); emit decoded(); if(getDuration()-getTime()<500){ stop(); } } }
static void test_encode(skiatest::Reporter* reporter, SkImage* image) { const SkIRect ir = SkIRect::MakeXYWH(5, 5, 10, 10); SkAutoTUnref<SkData> origEncoded(image->encode()); REPORTER_ASSERT(reporter, origEncoded); REPORTER_ASSERT(reporter, origEncoded->size() > 0); SkAutoTUnref<SkImage> decoded(SkImage::NewFromEncoded(origEncoded)); REPORTER_ASSERT(reporter, decoded); assert_equal(reporter, image, nullptr, decoded); // Now see if we can instantiate an image from a subset of the surface/origEncoded decoded.reset(SkImage::NewFromEncoded(origEncoded, &ir)); REPORTER_ASSERT(reporter, decoded); assert_equal(reporter, image, &ir, decoded); }
status fetch (void const* key, nodeobject::ptr* pobject) { pobject->reset (); status status (ok); hyperleveldb::readoptions const options; hyperleveldb::slice const slice (static_cast <char const*> (key), m_keybytes); std::string string; hyperleveldb::status getstatus = m_db->get (options, slice, &string); if (getstatus.ok ()) { decodedblob decoded (key, string.data (), string.size ()); if (decoded.wasok ()) { *pobject = decoded.createobject (); } else { // decoding failed, probably corrupted! // status = datacorrupt; } } else { if (getstatus.iscorruption ()) { status = datacorrupt; } else if (getstatus.isnotfound ()) { status = notfound; } else { status = unknown; } } return status; }
static void test_encode(skiatest::Reporter* reporter, GrContext* ctx) { const SkIRect ir = SkIRect::MakeXYWH(5, 5, 10, 10); SkAutoTUnref<SkImage> orig(make_image(ctx, 20, 20, ir)); SkAutoTUnref<SkData> origEncoded(orig->encode()); REPORTER_ASSERT(reporter, origEncoded); REPORTER_ASSERT(reporter, origEncoded->size() > 0); SkAutoTUnref<SkImage> decoded(SkImage::NewFromEncoded(origEncoded)); REPORTER_ASSERT(reporter, decoded); assert_equal(reporter, orig, NULL, decoded); // Now see if we can instantiate an image from a subset of the surface/origEncoded decoded.reset(SkImage::NewFromEncoded(origEncoded, &ir)); REPORTER_ASSERT(reporter, decoded); assert_equal(reporter, orig, &ir, decoded); }
void HexWidget::openURL(const char *_url, KIND_OF_OPEN _mode) { /* This code is from KEDIT (torben's I guess) */ netFile = _url; netFile.detach(); KURL u( netFile.data() ); if ( u.isMalformed()) { QMessageBox::warning (0, i18n("Error"), i18n("Malformed URL")); return; } // Just a usual file ? if ( strcmp( u.protocol(), "file" ) == 0 && !u.hasSubProtocol() ) { QString decoded( u.path() ); KURL::decodeURL( decoded ); open( decoded, _mode ); return; } if ( kfm != 0L ) { QMessageBox::warning (0, i18n("Error"), i18n("KHexdit is already waiting\nfor an internet job to finish\n\nWait until this one is finished\nor stop the running one.")); return; } kfm = new KFM; if ( !kfm->isOK() ) { QMessageBox::warning (0, i18n("Error"), i18n("Could not start KFM")); delete kfm; kfm = 0L; return; } tmpFile.sprintf( "file:/tmp/khexdit%li", time( 0L ) ); connect( kfm, SIGNAL( finished() ), SLOT( slotKFMFinished() ) ); kfm->copy( netFile.data(), tmpFile.data() ); kfmAction = HexWidget::GET; //openMode = _mode; }
void HexWidget::slotKFMFinished() { if ( kfmAction == HexWidget::GET ) { KURL u(tmpFile); QString decoded( u.path() ); KURL::decodeURL( decoded ); open( decoded, netFile, READWRITE ); delete kfm; kfm = 0L; } if ( kfmAction == HexWidget::PUT ) { unlink( tmpFile.data() ); delete kfm; kfm = 0L; } }
void ViStatistician::nextFile() { if(mFiles.isEmpty()) { if(mMode == Pearson) printFileFinalPearson(); else printFileFinal(); quit(); } else { mCurrentFile = mFiles.dequeue(); mCurrentObject->clearBuffers(); mCurrentObject.setNull(); mCurrentObject = ViAudioObject::create(); mCurrentObject->setFilePath(ViAudio::Target, mCurrentFile); mCurrentObject->mDisbaleFinishSignal = true; // The finish signal sometimes gives a seg fault QObject::connect(mCurrentObject.data(), SIGNAL(decoded()), this, SLOT(process())); mCurrentObject->decode(); } }
void ViNoiseBatcher::nextFile() { if(mFiles.isEmpty()) { quit(); } else { initParams(); for(int i = 0; i < mParamsStart.size(); ++i) mParamsCurrent[i] = mParamsStart[i]; mCurrentFile = mFiles.dequeue(); mCurrentObject->clearBuffers(); mCurrentObject.setNull(); mCurrentObject = ViAudioObject::create(); mCurrentObject->setFilePath(ViAudio::Target, mCurrentFile); mCurrentObject->mDisbaleFinishSignal = true; // The finish signal sometimes gives a seg fault QObject::connect(mCurrentObject.data(), SIGNAL(decoded()), this, SLOT(process())); mCurrentObject->decode(); } }
std::string XMLUtils::Decode(const std::string& encoded) { std::string::size_type iAmp = encoded.find(AMP); if (iAmp == std::string::npos) return encoded; std::string decoded(encoded, 0, iAmp); std::string::size_type iSize = encoded.size(); decoded.reserve(iSize); const char* ens = encoded.c_str(); while (iAmp != iSize) { if (encoded[iAmp] == AMP && iAmp+1 < iSize) { int iEntity; for (iEntity=0; xmlEntity[iEntity] != 0; ++iEntity) { if (strncmp(ens+iAmp+1, xmlEntity[iEntity], xmlEntLen[iEntity]) == 0) { decoded += rawEntity[iEntity]; iAmp += xmlEntLen[iEntity]+1; break; } } if (xmlEntity[iEntity] == 0) // unrecognized sequence { decoded += encoded[iAmp++]; } } else { decoded += encoded[iAmp++]; } } return decoded; }
void ViBenchMarker3::process1(bool generate) { QObject::disconnect(mCurrentObject.data(), SIGNAL(decoded()), this, SLOT(process1())); if(generate) { ViNoiseCreator creator; creator.createNoise(mCurrentObject->buffer(ViAudio::Target), mCurrentObject->buffer(ViAudio::Corrupted), mCurrentObject->buffer(ViAudio::CustomMask), mCurrentObject->buffer(ViAudio::Custom)); } mCurrentObject->clearBuffer(ViAudio::Target); mCurrentObject->clearBuffer(ViAudio::Noise); mCurrentObject->clearBuffer(ViAudio::NoiseMask); /*if(mParamsStart.size() == 1) mDetector->setParameters(mParamsCurrent[0]); else if(mParamsStart.size() == 2) mDetector->setParameters(mParamsCurrent[0], mParamsCurrent[1]); else if(mParamsStart.size() == 3) mDetector->setParameters(mParamsCurrent[0], mParamsCurrent[1], mParamsCurrent[2]); else if(mParamsStart.size() == 4) mDetector->setParameters(mParamsCurrent[0], mParamsCurrent[1], mParamsCurrent[2], mParamsCurrent[3]); //else { cout << "Invalid parameter count of "<<mParamsStart.size()<<". Min: 1, Max: 4" << endl; quit(); }*/ QObject::connect(mCurrentObject.data(), SIGNAL(noiseGenerated()), this, SLOT(process2())); mTime.restart(); mCurrentObject->generateNoiseMask(mDetector); }
std::vector<Test::Result> run() override { Test::Result result("Package transform"); std::unique_ptr<Botan::BlockCipher> cipher(Botan::BlockCipher::create("AES-128")); std::vector<uint8_t> input = unlock(Test::rng().random_vec(Test::rng().next_byte())); std::vector<uint8_t> output(input.size() + cipher->block_size()); // aont_package owns/deletes the passed cipher object, kind of a bogus API Botan::aont_package(Test::rng(), cipher->clone(), input.data(), input.size(), output.data()); std::vector<uint8_t> decoded(output.size() - cipher->block_size()); Botan::aont_unpackage(cipher->clone(), output.data(), output.size(), decoded.data()); result.test_eq("Package transform is reversible", decoded, input); output[0] ^= 1; Botan::aont_unpackage(cipher->clone(), output.data(), output.size(), decoded.data()); result.test_ne("Bitflip breaks package transform", decoded, input); output[0] ^= 1; Botan::aont_unpackage(cipher->clone(), output.data(), output.size(), decoded.data()); result.test_eq("Package transform is still reversible", decoded, input); // More tests including KATs would be useful for these functions return std::vector<Test::Result>{result}; }
void KFont::GetDistanceData(uint8_t* p, uint32_t pitch, int32_t index) const { std::vector<uint8_t> decoded(char_size_ * char_size_); uint32_t size; this->GetLZMADistanceData(nullptr, size, index); std::vector<uint8_t> in_data(size); this->GetLZMADistanceData(&in_data[0], size, index); SizeT s_out_len = static_cast<SizeT>(decoded.size()); SizeT s_src_len = static_cast<SizeT>(in_data.size() - LZMA_PROPS_SIZE); LZMALoader::Instance().LzmaUncompress(static_cast<Byte*>(&decoded[0]), &s_out_len, &in_data[LZMA_PROPS_SIZE], &s_src_len, &in_data[0], LZMA_PROPS_SIZE); uint8_t const * char_data = &decoded[0]; for (uint32_t y = 0; y < char_size_; ++ y) { std::memcpy(p, char_data, char_size_); p += pitch; char_data += char_size_; } }
int CDb3Mmap::GetContactSettingWorker(MCONTACT contactID, LPCSTR szModule, LPCSTR szSetting, DBVARIANT *dbv, int isStatic) { if (szSetting == NULL || szModule == NULL) return 1; // the db format can't tolerate more than 255 bytes of space (incl. null) for settings+module name int settingNameLen = (int)mir_strlen(szSetting); int moduleNameLen = (int)mir_strlen(szModule); if (settingNameLen > 0xFE) { #ifdef _DEBUG OutputDebugStringA("GetContactSettingWorker() got a > 255 setting name length. \n"); #endif return 1; } if (moduleNameLen > 0xFE) { #ifdef _DEBUG OutputDebugStringA("GetContactSettingWorker() got a > 255 module name length. \n"); #endif return 1; } mir_cslock lck(m_csDbAccess); LBL_Seek: char *szCachedSettingName = m_cache->GetCachedSetting(szModule, szSetting, moduleNameLen, settingNameLen); log3("get [%08p] %s (%p)", hContact, szCachedSettingName, szCachedSettingName); DBVARIANT *pCachedValue = m_cache->GetCachedValuePtr(contactID, szCachedSettingName, 0); if (pCachedValue != NULL) { if (pCachedValue->type == DBVT_ASCIIZ || pCachedValue->type == DBVT_UTF8) { int cbOrigLen = dbv->cchVal; char *cbOrigPtr = dbv->pszVal; memcpy(dbv, pCachedValue, sizeof(DBVARIANT)); if (isStatic) { int cbLen = 0; if (pCachedValue->pszVal != NULL) cbLen = (int)mir_strlen(pCachedValue->pszVal); cbOrigLen--; dbv->pszVal = cbOrigPtr; if (cbLen < cbOrigLen) cbOrigLen = cbLen; memcpy(dbv->pszVal, pCachedValue->pszVal, cbOrigLen); dbv->pszVal[cbOrigLen] = 0; dbv->cchVal = cbLen; } else { dbv->pszVal = (char*)mir_alloc(mir_strlen(pCachedValue->pszVal) + 1); mir_strcpy(dbv->pszVal, pCachedValue->pszVal); } } else memcpy(dbv, pCachedValue, sizeof(DBVARIANT)); log2("get cached %s (%p)", printVariant(dbv), pCachedValue); return (pCachedValue->type == DBVT_DELETED) ? 1 : 0; } // never look db for the resident variable if (szCachedSettingName[-1] != 0) return 1; DBCachedContact *cc; DWORD ofsContact = GetContactOffset(contactID, &cc); DWORD ofsModuleName = GetModuleNameOfs(szModule); DBContact dbc = *(DBContact*)DBRead(ofsContact, NULL); if (dbc.signature != DBCONTACT_SIGNATURE) return 1; DWORD ofsSettingsGroup = GetSettingsGroupOfsByModuleNameOfs(&dbc, ofsModuleName); if (ofsSettingsGroup) { int bytesRemaining; unsigned varLen; DWORD ofsBlobPtr = ofsSettingsGroup + offsetof(DBContactSettings, blob); PBYTE pBlob = DBRead(ofsBlobPtr, &bytesRemaining); while (pBlob[0]) { NeedBytes(1 + settingNameLen); if (pBlob[0] == settingNameLen && !memcmp(pBlob + 1, szSetting, settingNameLen)) { MoveAlong(1 + settingNameLen); NeedBytes(5); if (isStatic && (pBlob[0] & DBVTF_VARIABLELENGTH) && VLT(dbv->type) != VLT(pBlob[0])) return 1; BYTE iType = dbv->type = pBlob[0]; switch (iType) { case DBVT_DELETED: /* this setting is deleted */ dbv->type = DBVT_DELETED; return 2; case DBVT_BYTE: dbv->bVal = pBlob[1]; break; case DBVT_WORD: memmove(&(dbv->wVal), (PWORD)(pBlob + 1), 2); break; case DBVT_DWORD: memmove(&(dbv->dVal), (PDWORD)(pBlob + 1), 4); break; case DBVT_UTF8: case DBVT_ASCIIZ: varLen = *(PWORD)(pBlob + 1); NeedBytes(int(3 + varLen)); if (isStatic) { dbv->cchVal--; if (varLen < dbv->cchVal) dbv->cchVal = varLen; memmove(dbv->pszVal, pBlob + 3, dbv->cchVal); // decode dbv->pszVal[dbv->cchVal] = 0; dbv->cchVal = varLen; } else { dbv->pszVal = (char*)mir_alloc(1 + varLen); memmove(dbv->pszVal, pBlob + 3, varLen); dbv->pszVal[varLen] = 0; } break; case DBVT_BLOB: varLen = *(PWORD)(pBlob + 1); NeedBytes(int(3 + varLen)); if (isStatic) { if (varLen < dbv->cpbVal) dbv->cpbVal = varLen; memmove(dbv->pbVal, pBlob + 3, dbv->cpbVal); } else { dbv->pbVal = (BYTE *)mir_alloc(varLen); memmove(dbv->pbVal, pBlob + 3, varLen); } dbv->cpbVal = varLen; break; case DBVT_ENCRYPTED: if (m_crypto == NULL) return 1; else { varLen = *(PWORD)(pBlob + 1); NeedBytes(int(3 + varLen)); size_t realLen; ptrA decoded(m_crypto->decodeString(pBlob + 3, varLen, &realLen)); if (decoded == NULL) return 1; varLen = (WORD)realLen; dbv->type = DBVT_UTF8; if (isStatic) { dbv->cchVal--; if (varLen < dbv->cchVal) dbv->cchVal = varLen; memmove(dbv->pszVal, decoded, dbv->cchVal); dbv->pszVal[dbv->cchVal] = 0; dbv->cchVal = varLen; } else { dbv->pszVal = (char*)mir_alloc(1 + varLen); memmove(dbv->pszVal, decoded, varLen); dbv->pszVal[varLen] = 0; } } break; } /**** add to cache **********************/ if (iType != DBVT_BLOB && iType != DBVT_ENCRYPTED) { DBVARIANT *pCachedValue = m_cache->GetCachedValuePtr(contactID, szCachedSettingName, 1); if (pCachedValue != NULL) { m_cache->SetCachedVariant(dbv, pCachedValue); log3("set cached [%08p] %s (%p)", hContact, szCachedSettingName, pCachedValue); } } return 0; } NeedBytes(1); MoveAlong(pBlob[0] + 1); NeedBytes(3); MoveAlong(1 + GetSettingValueLength(pBlob)); NeedBytes(1); } } // try to get the missing mc setting from the active sub if (cc && cc->IsMeta() && ValidLookupName(szModule, szSetting)) { if (contactID = db_mc_getDefault(contactID)) { if (szModule = GetContactProto(contactID)) { moduleNameLen = (int)mir_strlen(szModule); goto LBL_Seek; } } } logg(); return 1; }
int CDbxKV::GetContactSettingWorker(MCONTACT contactID, LPCSTR szModule, LPCSTR szSetting, DBVARIANT *dbv, int isStatic) { if (szSetting == NULL || szModule == NULL) return 1; // the db format can't tolerate more than 255 bytes of space (incl. null) for settings+module name int settingNameLen = (int)strlen(szSetting); int moduleNameLen = (int)strlen(szModule); if (settingNameLen > 0xFE) { #ifdef _DEBUG OutputDebugStringA("GetContactSettingWorker() got a > 255 setting name length. \n"); #endif return 1; } if (moduleNameLen > 0xFE) { #ifdef _DEBUG OutputDebugStringA("GetContactSettingWorker() got a > 255 module name length. \n"); #endif return 1; } mir_cslock lck(m_csDbAccess); LBL_Seek: char *szCachedSettingName = m_cache->GetCachedSetting(szModule, szSetting, moduleNameLen, settingNameLen); DBVARIANT *pCachedValue = m_cache->GetCachedValuePtr(contactID, szCachedSettingName, 0); if (pCachedValue != NULL) { if (pCachedValue->type == DBVT_ASCIIZ || pCachedValue->type == DBVT_UTF8) { int cbOrigLen = dbv->cchVal; char *cbOrigPtr = dbv->pszVal; memcpy(dbv, pCachedValue, sizeof(DBVARIANT)); if (isStatic) { int cbLen = 0; if (pCachedValue->pszVal != NULL) cbLen = (int)strlen(pCachedValue->pszVal); cbOrigLen--; dbv->pszVal = cbOrigPtr; if (cbLen < cbOrigLen) cbOrigLen = cbLen; memcpy(dbv->pszVal, pCachedValue->pszVal, cbOrigLen); dbv->pszVal[cbOrigLen] = 0; dbv->cchVal = cbLen; } else { dbv->pszVal = (char*)mir_alloc(strlen(pCachedValue->pszVal) + 1); strcpy(dbv->pszVal, pCachedValue->pszVal); } } else memcpy(dbv, pCachedValue, sizeof(DBVARIANT)); return (pCachedValue->type == DBVT_DELETED) ? 1 : 0; } // never look db for the resident variable if (szCachedSettingName[-1] != 0) return 1; DBCachedContact *cc = (contactID) ? m_cache->GetCachedContact(contactID) : NULL; DBSettingSortingKey keySearch; keySearch.dwContactID = contactID; keySearch.dwOfsModule = GetModuleNameOfs(szModule); strncpy_s(keySearch.szSettingName, szSetting, _TRUNCATE); ham_key_t key = { 2 * sizeof(DWORD) + settingNameLen, &keySearch }; ham_record_t rec = { 0 }; if (ham_db_find(m_dbSettings, NULL, &key, &rec, 0)) { // try to get the missing mc setting from the active sub if (cc && cc->IsMeta() && ValidLookupName(szModule, szSetting)) { if (contactID = db_mc_getDefault(contactID)) { if (szModule = GetContactProto(contactID)) { moduleNameLen = (int)strlen(szModule); goto LBL_Seek; } } } return 1; } BYTE *pBlob = (BYTE*)rec.data; if (isStatic && (pBlob[0] & DBVTF_VARIABLELENGTH) && VLT(dbv->type) != VLT(pBlob[0])) return 1; int varLen; BYTE iType = dbv->type = pBlob[0]; pBlob++; switch (iType) { case DBVT_DELETED: /* this setting is deleted */ dbv->type = DBVT_DELETED; return 2; case DBVT_BYTE: dbv->bVal = *pBlob; break; case DBVT_WORD: dbv->wVal = *(WORD*)pBlob; break; case DBVT_DWORD: dbv->dVal = *(DWORD*)pBlob; break; case DBVT_UTF8: case DBVT_ASCIIZ: varLen = *(WORD*)pBlob; pBlob += 2; if (isStatic) { dbv->cchVal--; if (varLen < dbv->cchVal) dbv->cchVal = varLen; memmove(dbv->pszVal, pBlob, dbv->cchVal); // decode dbv->pszVal[dbv->cchVal] = 0; dbv->cchVal = varLen; } else { dbv->pszVal = (char*)mir_alloc(1 + varLen); memmove(dbv->pszVal, pBlob, varLen); dbv->pszVal[varLen] = 0; } break; case DBVT_BLOB: varLen = *(WORD*)pBlob; pBlob += 2; if (isStatic) { if (varLen < dbv->cpbVal) dbv->cpbVal = varLen; memmove(dbv->pbVal, pBlob, dbv->cpbVal); } else { dbv->pbVal = (BYTE *)mir_alloc(varLen); memmove(dbv->pbVal, pBlob, varLen); } dbv->cpbVal = varLen; break; case DBVT_ENCRYPTED: if (m_crypto == NULL) return 1; varLen = *(WORD*)pBlob; pBlob += 2; size_t realLen; ptrA decoded(m_crypto->decodeString(pBlob, varLen, &realLen)); if (decoded == NULL) return 1; varLen = (WORD)realLen; dbv->type = DBVT_UTF8; if (isStatic) { dbv->cchVal--; if (varLen < dbv->cchVal) dbv->cchVal = varLen; memmove(dbv->pszVal, decoded, dbv->cchVal); dbv->pszVal[dbv->cchVal] = 0; dbv->cchVal = varLen; } else { dbv->pszVal = (char*)mir_alloc(1 + varLen); memmove(dbv->pszVal, decoded, varLen); dbv->pszVal[varLen] = 0; } break; } /**** add to cache **********************/ if (iType != DBVT_BLOB && iType != DBVT_ENCRYPTED) { DBVARIANT *pCachedValue = m_cache->GetCachedValuePtr(contactID, szCachedSettingName, 1); if (pCachedValue != NULL) m_cache->SetCachedVariant(dbv, pCachedValue); } return 0; }
void receiveRequest(int clientSockfd, string dir){ // read/write data from/into the connection bool isEnd = false; char buf[1000] = { 0 }; stringstream ssOverall; stringstream ssIteration; const string endingStr = "\r\n\r\n"; unsigned int endingCount = 0; while (!isEnd) { memset(buf, '\0', sizeof(buf)); if (recv(clientSockfd, buf, 1000, 0) == -1) { perror("recv"); return;// 5; } ssOverall << buf; ssIteration << buf; string currString = ssIteration.str(); for(unsigned int i = 0; i < currString.length(); i++){ if(currString[i] == endingStr[endingCount]) endingCount++; else endingCount = 0; if(endingCount == 4){ string totalReqString = ssOverall.str(); // cout << "--------totalReqString--------" << endl << totalReqString << endl; vector<uint8_t> decoded(totalReqString.begin(), totalReqString.end()); HttpRequest req = HttpRequest::decode((ByteBlob)decoded); HttpResponse resp = processRequest(req, dir); //Process the request object ByteBlob respBB = resp.encode(); uint8_t* respBytes = &respBB[0]; int respBytesSize = sizeof(uint8_t) * respBB.size(); // cout << "Num bytes being sent total: " << respBB.size() << endl; // cout << "Num bytes being sent, data: " << resp.getData().size() << endl; std::ofstream os("asdfasdfasdf.jpg"); if (!os) { std::cerr<<"Error writing to ..."<<std::endl; } else { HttpResponse decodedResp = HttpResponse::decode(respBB); // ByteBlob data = resp.getData(); ByteBlob data = decodedResp.getData(); for(ByteBlob::iterator x=data.begin(); x<data.end(); x++){ os << *x; } os.close(); } if (send(clientSockfd, respBytes, respBytesSize, 0) == -1) { perror("send"); return;// 6; } //ssOverall.str(""); doesn't matter, we're closing connection //endingCount = 0; isEnd = true; break; } } } close(clientSockfd); cout << "Server closing" << endl;; }
/* *TODO: * if output is null or dummy, the use duration to wait */ void AudioThread::run() { DPTR_D(AudioThread); //No decoder or output. No audio output is ok, just display picture if (!d.dec || !d.dec->isAvailable() || !d.outputSet) return; resetState(); Q_ASSERT(d.clock != 0); AudioDecoder *dec = static_cast<AudioDecoder*>(d.dec); AudioOutput *ao = 0; // first() is not null even if list empty if (!d.outputSet->outputs().isEmpty()) ao = static_cast<AudioOutput*>(d.outputSet->outputs().first()); //TODO: not here d.init(); //TODO: bool need_sync in private class bool is_external_clock = d.clock->clockType() == AVClock::ExternalClock; Packet pkt; while (!d.stop) { processNextTask(); //TODO: why put it at the end of loop then playNextFrame() not work? if (tryPause()) { //DO NOT continue, or playNextFrame() will fail if (d.stop) break; //the queue is empty and may block. should setBlocking(false) wake up cond empty? } else { if (isPaused()) continue; } if (d.packets.isEmpty() && !d.stop) { d.stop = d.demux_end; } if (d.stop) { qDebug("audio thread stop before take packet"); break; } if (!pkt.isValid()) { pkt = d.packets.take(); //wait to dequeue } if (!pkt.isValid()) { qDebug("Invalid packet! flush audio codec context!!!!!!!! audio queue size=%d", d.packets.size()); dec->flush(); continue; } bool skip_render = pkt.pts < d.render_pts0; // audio has no key frame, skip rendering equals to skip decoding if (skip_render) { d.clock->updateValue(pkt.pts); /* * audio may be too fast than video if skip without sleep * a frame is about 20ms. sleep time must be << frame time */ qreal a_v = pkt.pts - d.clock->videoPts(); //qDebug("skip audio decode at %f/%f v=%f a-v=%f", pkt.pts, d.render_pts0, d.clock->videoPts(), a_v); if (a_v > 0) msleep(qMin((ulong)300, ulong(a_v*1000.0))); else msleep(2); pkt = Packet(); //mark invalid to take next continue; } d.render_pts0 = 0; if (is_external_clock) { d.delay = pkt.pts - d.clock->value(); /* *after seeking forward, a packet may be the old, v packet may be *the new packet, then the d.delay is very large, omit it. *TODO: 1. how to choose the value * 2. use last delay when seeking */ if (qAbs(d.delay) < 2.718) { if (d.delay < -kSyncThreshold) { //Speed up. drop frame? //continue; } while (d.delay > kSyncThreshold) { //Slow down //d.delay_cond.wait(&d.mutex, d.delay*1000); //replay may fail. why? //qDebug("~~~~~wating for %f msecs", d.delay*1000); usleep(kSyncThreshold * 1000000UL); if (d.stop) d.delay = 0; else d.delay -= kSyncThreshold; } if (d.delay > 0) usleep(d.delay * 1000000UL); } else { //when to drop off? if (d.delay > 0) { msleep(64); } else { //audio packet not cleaned up? continue; } } } else { d.clock->updateValue(pkt.pts); } //DO NOT decode and convert if ao is not available or mute! bool has_ao = ao && ao->isAvailable(); //if (!has_ao) {//do not decode? // TODO: move resampler to AudioFrame, like VideoFrame does if (has_ao && dec->resampler()) { if (dec->resampler()->speed() != ao->speed() || dec->resampler()->outAudioFormat() != ao->audioFormat()) { //resample later to ensure thread safe. TODO: test if (d.resample) { qDebug("decoder set speed: %.2f", ao->speed()); dec->resampler()->setOutAudioFormat(ao->audioFormat()); dec->resampler()->setSpeed(ao->speed()); dec->resampler()->prepare(); d.resample = false; } else { d.resample = true; } } } else { if (dec->resampler() && dec->resampler()->speed() != d.clock->speed()) { if (d.resample) { qDebug("decoder set speed: %.2f", d.clock->speed()); dec->resampler()->setSpeed(d.clock->speed()); dec->resampler()->prepare(); d.resample = false; } else { d.resample = true; } } } if (d.stop) { qDebug("audio thread stop before decode()"); break; } QMutexLocker locker(&d.mutex); Q_UNUSED(locker); if (!dec->decode(pkt.data)) { qWarning("Decode audio failed"); qreal dt = pkt.pts - d.last_pts; if (dt > 0.618 || dt < 0) { dt = 0; } //qDebug("sleep %f", dt); //TODO: avoid acummulative error. External clock? msleep((unsigned long)(dt*1000.0)); pkt = Packet(); d.last_pts = d.clock->value(); //not pkt.pts! the delay is updated! continue; } QByteArray decoded(dec->data()); int decodedSize = decoded.size(); int decodedPos = 0; qreal delay = 0; //AudioFormat.durationForBytes() calculates int type internally. not accurate AudioFormat &af = dec->resampler()->inAudioFormat(); qreal byte_rate = af.bytesPerSecond(); while (decodedSize > 0) { if (d.stop) { qDebug("audio thread stop after decode()"); break; } // TODO: set to format.bytesPerFrame()*1024? const int chunk = qMin(decodedSize, has_ao ? ao->bufferSize() : 1024*4);//int(max_len*byte_rate)); //AudioFormat.bytesForDuration const qreal chunk_delay = (qreal)chunk/(qreal)byte_rate; pkt.pts += chunk_delay; QByteArray decodedChunk(chunk, 0); //volume == 0 || mute if (has_ao) { //TODO: volume filter and other filters!!! if (!ao->isMute()) { decodedChunk = QByteArray::fromRawData(decoded.constData() + decodedPos, chunk); qreal vol = ao->volume(); if (vol != 1.0) { int len = decodedChunk.size()/ao->audioFormat().bytesPerSample(); switch (ao->audioFormat().sampleFormat()) { case AudioFormat::SampleFormat_Unsigned8: case AudioFormat::SampleFormat_Unsigned8Planar: { quint8 *data = (quint8*)decodedChunk.data(); //TODO: other format? for (int i = 0; i < len; data[i++] *= vol) {} } break; case AudioFormat::SampleFormat_Signed16: case AudioFormat::SampleFormat_Signed16Planar: { qint16 *data = (qint16*)decodedChunk.data(); //TODO: other format? for (int i = 0; i < len; data[i++] *= vol) {} } break; case AudioFormat::SampleFormat_Signed32: case AudioFormat::SampleFormat_Signed32Planar: { qint32 *data = (qint32*)decodedChunk.data(); //TODO: other format? for (int i = 0; i < len; data[i++] *= vol) {} } break; case AudioFormat::SampleFormat_Float: case AudioFormat::SampleFormat_FloatPlanar: { float *data = (float*)decodedChunk.data(); //TODO: other format? for (int i = 0; i < len; data[i++] *= vol) {} } break; case AudioFormat::SampleFormat_Double: case AudioFormat::SampleFormat_DoublePlanar: { double *data = (double*)decodedChunk.data(); //TODO: other format? for (int i = 0; i < len; data[i++] *= vol) {} } break; default: break; } } } ao->waitForNextBuffer(); ao->receiveData(decodedChunk, pkt.pts); ao->play(); d.clock->updateValue(ao->timestamp()); } else { d.clock->updateDelay(delay += chunk_delay); /* * why need this even if we add delay? and usleep sounds weird * the advantage is if no audio device, the play speed is ok too * So is portaudio blocking the thread when playing? */ static bool sWarn_no_ao = true; //FIXME: no warning when replay. warn only once if (sWarn_no_ao) { qDebug("Audio output not available! msleep(%lu)", (unsigned long)((qreal)chunk/(qreal)byte_rate * 1000)); sWarn_no_ao = false; } //TODO: avoid acummulative error. External clock? msleep((unsigned long)(chunk_delay * 1000.0)); } decodedPos += chunk; decodedSize -= chunk; } int undecoded = dec->undecodedSize(); if (undecoded > 0) { pkt.data.remove(0, pkt.data.size() - undecoded); } else { pkt = Packet(); } d.last_pts = d.clock->value(); //not pkt.pts! the delay is updated! } qDebug("Audio thread stops running..."); }
void KeyFinderWorkerThread::run(){ if(!haveParams){ emit failed("No parameters."); return; } // initialise stream and decode file into it AudioStream* astrm = NULL; AudioFileDecoder* dec = AudioFileDecoder::getDecoder(filePath.toUtf8().data()); try{ astrm = dec->decodeFile(filePath.toUtf8().data()); }catch(Exception){ delete astrm; delete dec; emit failed("Could not decode file."); return; } delete dec; emit decoded(); // make audio stream monaural astrm->reduceToMono(); emit madeMono(); // downsample if necessary if(prefs.getDFactor() > 1){ Downsampler* ds = Downsampler::getDownsampler(prefs.getDFactor(),astrm->getFrameRate(),prefs.getLastFreq()); try{ astrm = ds->downsample(astrm,prefs.getDFactor()); }catch(Exception){ delete astrm; delete ds; emit failed("Downsampler failed."); return; } delete ds; emit downsampled(); } // start spectrum analysis SpectrumAnalyser* sa = NULL; Chromagram* ch = NULL; sa = SpectrumAnalyserFactory::getInstance()->getSpectrumAnalyser(astrm->getFrameRate(),prefs); ch = sa->chromagram(astrm); delete astrm; // note we don't delete the spectrum analyser; it stays in the centralised factory for reuse. ch->reduceTuningBins(prefs); emit producedFullChromagram(*ch); // reduce chromagram ch->reduceToOneOctave(prefs); emit producedOneOctaveChromagram(*ch); // get energy level across track to weight segments std::vector<float> loudness(ch->getHops()); for(int h=0; h<ch->getHops(); h++) for(int b=0; b<ch->getBins(); b++) loudness[h] += ch->getMagnitude(h,b); // get harmonic change signal Segmentation* hcdf = Segmentation::getSegmentation(prefs); std::vector<double> harmonicChangeSignal = hcdf->getRateOfChange(ch,prefs); emit producedHarmonicChangeSignal(harmonicChangeSignal); // get track segmentation std::vector<int> changes = hcdf->getSegments(harmonicChangeSignal,prefs); changes.push_back(ch->getHops()); // It used to be getHops()-1. But this doesn't crash. So we like it. // batch output of keychange locations for Beatles experiment //for(int i=1; i<changes.size(); i++) // don't want the leading zero // std::cout << filePath.substr(53) << "\t" << std::fixed << std::setprecision(2) << changes[i]*(prefs.getHopSize()/(44100.0/prefs.getDFactor())) << std::endl; // end experiment output // get key estimates for segments KeyClassifier hc(prefs); std::vector<int> keys(0); std::vector<float> keyWeights(24); for(int i=0; i<(signed)changes.size()-1; i++){ std::vector<double> chroma(ch->getBins()); for(int j=changes[i]; j<changes[i+1]; j++) for(int k=0; k<ch->getBins(); k++) chroma[k] += ch->getMagnitude(j,k); int key = hc.classify(chroma); for(int j=changes[i]; j<changes[i+1]; j++){ keys.push_back(key); if(key < 24) // ignore parts that were classified as silent keyWeights[key] += loudness[j]; } } keys.push_back(keys[keys.size()-1]); // put last key on again to match length of track delete ch; emit producedKeyEstimates(keys); // get global key int mostCommonKey = 24; float mostCommonKeyWeight = 0.0; for(int i=0; i<(signed)keyWeights.size(); i++){ if(keyWeights[i] > mostCommonKeyWeight){ mostCommonKeyWeight = keyWeights[i]; mostCommonKey = i; } } emit producedGlobalKeyEstimate(mostCommonKey); return; }
/* * This callback is called when |AudioFileStreamParseBytes| has enough data to * extract one or more MP3 packets. */ void AppleMP3Reader::AudioSampleCallback(UInt32 aNumBytes, UInt32 aNumPackets, const void *aData, AudioStreamPacketDescription *aPackets) { LOGD("got %u bytes, %u packets\n", aNumBytes, aNumPackets); // 1 frame per packet * num channels * 32-bit float uint32_t decodedSize = MAX_AUDIO_FRAMES * mAudioChannels * sizeof(AudioDataValue); // descriptions for _decompressed_ audio packets. ignored. nsAutoArrayPtr<AudioStreamPacketDescription> packets(new AudioStreamPacketDescription[MAX_AUDIO_FRAMES]); // This API insists on having MP3 packets spoon-fed to it from a callback. // This structure exists only to pass our state and the result of the parser // on to the callback above. PassthroughUserData userData = { this, aNumPackets, aNumBytes, aData, aPackets, false }; do { // Decompressed audio buffer nsAutoArrayPtr<uint8_t> decoded(new uint8_t[decodedSize]); AudioBufferList decBuffer; decBuffer.mNumberBuffers = 1; decBuffer.mBuffers[0].mNumberChannels = mAudioChannels; decBuffer.mBuffers[0].mDataByteSize = decodedSize; decBuffer.mBuffers[0].mData = decoded.get(); // in: the max number of packets we can handle from the decoder. // out: the number of packets the decoder is actually returning. UInt32 numFrames = MAX_AUDIO_FRAMES; OSStatus rv = AudioConverterFillComplexBuffer(mAudioConverter, PassthroughInputDataCallback, &userData, &numFrames /* in/out */, &decBuffer, packets.get()); if (rv && rv != kNeedMoreData) { LOGE("Error decoding audio stream: %x\n", rv); break; } // If we decoded zero frames then AudiOConverterFillComplexBuffer is out // of data to provide. We drained its internal buffer completely on the // last pass. if (numFrames == 0 && rv == kNeedMoreData) { LOGD("FillComplexBuffer out of data exactly\n"); break; } int64_t time = FramesToUsecs(mCurrentAudioFrame, mAudioSampleRate).value(); int64_t duration = FramesToUsecs(numFrames, mAudioSampleRate).value(); LOGD("pushed audio at time %lfs; duration %lfs\n", (double)time / USECS_PER_S, (double)duration / USECS_PER_S); AudioData *audio = new AudioData(mDecoder->GetResource()->Tell(), time, duration, numFrames, reinterpret_cast<AudioDataValue *>(decoded.forget()), mAudioChannels, mAudioSampleRate); mAudioQueue.Push(audio); mCurrentAudioFrame += numFrames; if (rv == kNeedMoreData) { // No error; we just need more data. LOGD("FillComplexBuffer out of data\n"); break; } } while (true); }
void run() { bool doDecode = false; static const int samples = 450*1024; static const int inputBufferSize = samples; static const int sampleSpaceBefore = 256; static const int sampleSpaceAfter = 256; FileHandle in = File("captured.zdr", true).openRead(); UInt64 inputFileSizeRemaining = in.size(); Array<Byte> inputBuffer(inputBufferSize); int inputBufferRemaining = 0; Byte* inputPointer = 0; z_stream zs; memset(&zs, 0, sizeof(z_stream)); if (inflateInit(&zs) != Z_OK) throw Exception("inflateInit failed"); Array<Byte> buffer(sampleSpaceBefore + samples + sampleSpaceAfter); Byte* b = &buffer[0] + sampleSpaceBefore; for (int i = 0; i < sampleSpaceBefore; ++i) b[i - sampleSpaceBefore] = 0; for (int i = 0; i < sampleSpaceAfter; ++i) b[i + samples] = 0; int outputBytesRemaining = samples; Vector outputSize; NTSCCaptureDecoder<UInt32> decoder; if (doDecode) outputSize = Vector(1280, 720); else outputSize = Vector(1824, 253); Bitmap<UInt32> decoded(outputSize); if (doDecode) decoder.setOutputBuffer( decoded.subBitmap(Vector(160, 0), Vector(960, 720))); else decoder.setOutputBuffer(decoded); decoded.fill(0); decoder.setInputBuffer(b); decoder.setOutputPixelsPerLine(1140); decoder.setYScale(3); decoder.setDoDecode(doDecode); _handle = fopen("u:\\captured2.avi","wb"); if (!_handle) throw Exception("Can't open file"); _VectorCount = 1; _VectorTable[0].x = _VectorTable[0].y = 0; for (int s = 1; s <= 10; ++s) { for (int y = -s; y <= s; ++y) for (int x = -s; x <= s; ++x) { if (abs(x) == s || abs(y) == s) { _VectorTable[_VectorCount].x = x; _VectorTable[_VectorCount].y = y; ++_VectorCount; } } } memset(&_zstream, 0, sizeof(_zstream)); _pitch = outputSize.x + 2*MAX_VECTOR; if (deflateInit(&_zstream, 4) != Z_OK) throw Exception("deflateInit failed"); _bufSize = 4*outputSize.x*outputSize.y + 2*(1+(outputSize.x/8)) * (1+(outputSize.y/8))+1024; _bufSize += _bufSize / 1000; _buf = malloc(_bufSize); if (!_buf) throw Exception("Out of memory"); _index = (UInt8*)malloc(16*4096); if (!_buf) throw Exception("Out of memory"); _indexsize = 16*4096; _indexused = 8; for (int i = 0; i < AVI_HEADER_SIZE; ++i) fputc(0, _handle); _frames = 0; _written = 0; _audioused = 0; _audiowritten = 0; int blockwidth = 16; int blockheight = 16; _pixelsize = 4; _bufsize = (outputSize.y + 2*MAX_VECTOR)*_pitch*_pixelsize+2048; _buf1.allocate(_bufsize); _buf2.allocate(_bufsize); _work.allocate(_bufsize); int xblocks = (outputSize.x/blockwidth); int xleft = outputSize.x % blockwidth; if (xleft) ++xblocks; int yblocks = (outputSize.y/blockheight); int yleft = outputSize.y % blockheight; if (yleft) ++yblocks; _blockcount = yblocks*xblocks; _blocks = new FrameBlock[_blockcount]; int i = 0; for (int y = 0; y < yblocks; ++y) { for (int x = 0; x < xblocks; ++x) { _blocks[i].start = ((y*blockheight) + MAX_VECTOR)*_pitch+ x*blockwidth + MAX_VECTOR; if (xleft && x == xblocks - 1) { _blocks[i].dx = xleft; } else { _blocks[i].dx = blockwidth; } if (yleft && y == yblocks - 1) { _blocks[i].dy = yleft; } else { _blocks[i].dy = blockheight; } ++i; } } memset(&_buf1[0], 0, _bufsize); memset(&_buf2[0], 0, _bufsize); memset(&_work[0], 0, _bufsize); _oldframe = &_buf1[0]; _newframe = &_buf2[0]; do { if (inputBufferRemaining == 0) { int bytesToRead = inputBufferSize; if (bytesToRead > inputFileSizeRemaining) bytesToRead = inputFileSizeRemaining; inputPointer = &inputBuffer[0]; in.read(inputPointer, bytesToRead); inputBufferRemaining = bytesToRead; inputFileSizeRemaining -= bytesToRead; } zs.avail_in = inputBufferRemaining; zs.next_in = inputPointer; zs.avail_out = outputBytesRemaining; zs.next_out = b + samples - outputBytesRemaining; int r = inflate(&zs, Z_SYNC_FLUSH); if (r != Z_STREAM_END && r != Z_OK) throw Exception("inflate failed"); outputBytesRemaining = zs.avail_out; inputPointer = zs.next_in; inputBufferRemaining = zs.avail_in; if (outputBytesRemaining == 0) { if (inflateReset(&zs) != Z_OK) throw Exception("inflateReset failed"); outputBytesRemaining = samples; console.write("."); decoder.decode(); bool keyFrame = false; if (_frames % 300 == 0) keyFrame = true; keyFrame = true; /* replace oldframe with new frame */ unsigned char* copyFrame = _newframe; _newframe = _oldframe; _oldframe = copyFrame; compress.linesDone = 0; compress.writeSize = _bufSize; compress.writeDone = 1; compress.writeBuf = (unsigned char *)_buf; /* Set a pointer to the first byte which will contain info about this frame */ unsigned char* firstByte = compress.writeBuf; *firstByte = 0; //Reset the work buffer _workUsed = 0; _workPos = 0; if (keyFrame) { /* Make a keyframe */ *firstByte |= Mask_KeyFrame; KeyframeHeader* header = (KeyframeHeader *)(compress.writeBuf + compress.writeDone); header->high_version = 0; // DBZV_VERSION_HIGH; header->low_version = 1; // DBZV_VERSION_LOW; header->compression = 1; // COMPRESSION_ZLIB header->format = 8; // ZMBV_FORMAT_32BPP header->blockwidth = 16; header->blockheight = 16; compress.writeDone += sizeof(KeyframeHeader); /* Copy the new frame directly over */ /* Restart deflate */ deflateReset(&_zstream); } for (int i = 0; i < outputSize.y; ++i) { void* rowPointer = decoded.data() + decoded.stride()*i; unsigned char* destStart = _newframe + _pixelsize*(MAX_VECTOR+(compress.linesDone+MAX_VECTOR)*_pitch); memcpy(destStart, rowPointer, outputSize.x * _pixelsize); destStart += _pitch * _pixelsize; compress.linesDone++; } if ((*compress.writeBuf) & Mask_KeyFrame) { /* Add the full frame data */ unsigned char* readFrame = _newframe + _pixelsize*(MAX_VECTOR+MAX_VECTOR*_pitch); for (int i = 0; i < outputSize.y; ++i) { memcpy(&_work[_workUsed], readFrame, outputSize.x*_pixelsize); readFrame += _pitch*_pixelsize; _workUsed += outputSize.x*_pixelsize; } } else { /* Add the delta frame data */ int written = 0; int lastvector = 0; signed char* vectors = (signed char*)&_work[_workUsed]; /* Align the following xor data on 4 byte boundary*/ _workUsed = (_workUsed + _blockcount*2 + 3) & ~3; int totalx = 0; int totaly = 0; for (int b = 0; b < _blockcount; ++b) { FrameBlock* block = &_blocks[b]; int bestvx = 0; int bestvy = 0; int bestchange = CompareBlock(0, 0, block); int possibles = 64; for (int v = 0; v < _VectorCount && possibles; ++v) { if (bestchange < 4) break; int vx = _VectorTable[v].x; int vy = _VectorTable[v].y; if (PossibleBlock(vx, vy, block) < 4) { --possibles; int testchange = CompareBlock(vx, vy, block); if (testchange < bestchange) { bestchange = testchange; bestvx = vx; bestvy = vy; } } } vectors[b*2+0] = (bestvx << 1); vectors[b*2+1] = (bestvy << 1); if (bestchange) { vectors[b*2+0] |= 1; long* pold=((long*)_oldframe) + block->start + bestvy*_pitch + bestvx; long* pnew=((long*)_newframe) + block->start; for (int y = 0; y < block->dy; ++y) { for (int x = 0; x < block->dx; ++x) { *((long*)&_work[_workUsed]) = pnew[x] ^ pold[x]; _workUsed += sizeof(long); } pold += _pitch; pnew += _pitch; } } } } /* Create the actual frame with compression */ _zstream.next_in = (Bytef *)&_work[0]; _zstream.avail_in = _workUsed; _zstream.total_in = 0; _zstream.next_out = (Bytef *)(compress.writeBuf + compress.writeDone); _zstream.avail_out = compress.writeSize - compress.writeDone; _zstream.total_out = 0; int res = deflate(&_zstream, Z_SYNC_FLUSH); int written = compress.writeDone + _zstream.total_out; CAPTURE_AddAviChunk( "00dc", written, _buf, keyFrame ? 0x10 : 0x0); ++_frames; //void CAPTURE_AddWave(UInt32 freq, UInt32 len, SInt16 * data) //{ // UInt left = WAVE_BUF - _audioused; // if (left > len) // left = len; // memcpy( &_audiobuf[_audioused], data, left*4); // _audioused += left; // _audiorate = freq; //} //if ( capture.video.audioused ) { // CAPTURE_AddAviChunk( "01wb", _audioused * 4, _audiobuf, 0); // _audiowritten = _audioused*4; // _audioused = 0; //} } } while (inputFileSizeRemaining != 0); if (inflateEnd(&zs) != Z_OK) throw Exception("inflateEnd failed"); int main_list; _header_pos = 0; /* Try and write an avi header */ AVIOUT4("RIFF"); // Riff header AVIOUTd(AVI_HEADER_SIZE + _written - 8 + _indexused); AVIOUT4("AVI "); AVIOUT4("LIST"); // List header main_list = _header_pos; AVIOUTd(0); // TODO size of list AVIOUT4("hdrl"); AVIOUT4("avih"); AVIOUTd(56); /* # of bytes to follow */ AVIOUTd((11*912*262*2)/315); /* Microseconds per frame */ // 1752256/105 ~= 16688 AVIOUTd(0); AVIOUTd(0); /* PaddingGranularity (whatever that might be) */ AVIOUTd(0x110); /* Flags,0x10 has index, 0x100 interleaved */ AVIOUTd(_frames); /* TotalFrames */ AVIOUTd(0); /* InitialFrames */ AVIOUTd(2); /* Stream count */ AVIOUTd(0); /* SuggestedBufferSize */ AVIOUTd(outputSize.x); /* Width */ AVIOUTd(outputSize.y); /* Height */ AVIOUTd(0); /* TimeScale: Unit used to measure time */ AVIOUTd(0); /* DataRate: Data rate of playback */ AVIOUTd(0); /* StartTime: Starting time of AVI data */ AVIOUTd(0); /* DataLength: Size of AVI data chunk */ /* Video stream list */ AVIOUT4("LIST"); AVIOUTd(4 + 8 + 56 + 8 + 40); /* Size of the list */ AVIOUT4("strl"); /* video stream header */ AVIOUT4("strh"); AVIOUTd(56); /* # of bytes to follow */ AVIOUT4("vids"); /* Type */ AVIOUT4(CODEC_4CC); /* Handler */ AVIOUTd(0); /* Flags */ AVIOUTd(0); /* Reserved, MS says: wPriority, wLanguage */ AVIOUTd(0); /* InitialFrames */ AVIOUTd(82137); /* Scale */ // 11*912*262 AVIOUTd(4921875); /* Rate: Rate/Scale == samples/second */ // 157500000 AVIOUTd(0); /* Start */ AVIOUTd(_frames); /* Length */ AVIOUTd(0); /* SuggestedBufferSize */ AVIOUTd(~0); /* Quality */ AVIOUTd(0); /* SampleSize */ AVIOUTd(0); /* Frame */ AVIOUTd(0); /* Frame */ /* The video stream format */ AVIOUT4("strf"); AVIOUTd(40); /* # of bytes to follow */ AVIOUTd(40); /* Size */ AVIOUTd(outputSize.x); /* Width */ AVIOUTd(outputSize.y); /* Height */ // OUTSHRT(1); OUTSHRT(24); /* Planes, Count */ AVIOUTd(0); AVIOUT4(CODEC_4CC); /* Compression */ AVIOUTd(outputSize.x*outputSize.y*4); /* SizeImage (in bytes?) */ AVIOUTd(0); /* XPelsPerMeter */ AVIOUTd(0); /* YPelsPerMeter */ AVIOUTd(0); /* ClrUsed: Number of colors used */ AVIOUTd(0); /* ClrImportant: Number of colors important */ /* Audio stream list */ AVIOUT4("LIST"); AVIOUTd(4 + 8 + 56 + 8 + 16); /* Length of list in bytes */ AVIOUT4("strl"); /* The audio stream header */ AVIOUT4("strh"); AVIOUTd(56); /* # of bytes to follow */ AVIOUT4("auds"); AVIOUTd(0); /* Format (Optionally) */ AVIOUTd(0); /* Flags */ AVIOUTd(0); /* Reserved, MS says: wPriority, wLanguage */ AVIOUTd(0); /* InitialFrames */ AVIOUTd(4); /* Scale */ AVIOUTd(_audiorate*4); /* Rate, actual rate is scale/rate */ AVIOUTd(0); /* Start */ if (!_audiorate) _audiorate = 1; AVIOUTd(_audiowritten/4); /* Length */ AVIOUTd(0); /* SuggestedBufferSize */ AVIOUTd(~0); /* Quality */ AVIOUTd(4); /* SampleSize */ AVIOUTd(0); /* Frame */ AVIOUTd(0); /* Frame */ /* The audio stream format */ AVIOUT4("strf"); AVIOUTd(16); /* # of bytes to follow */ AVIOUTw(1); /* Format, WAVE_ZMBV_FORMAT_PCM */ AVIOUTw(2); /* Number of channels */ AVIOUTd(_audiorate); /* SamplesPerSec */ AVIOUTd(_audiorate*4); /* AvgBytesPerSec*/ AVIOUTw(4); /* BlockAlign */ AVIOUTw(16); /* BitsPerSample */ int nmain = _header_pos - main_list - 4; /* Finish stream list, i.e. put number of bytes in the list to proper pos */ int njunk = AVI_HEADER_SIZE - 8 - 12 - _header_pos; AVIOUT4("JUNK"); AVIOUTd(njunk); /* Fix the size of the main list */ _header_pos = main_list; AVIOUTd(nmain); _header_pos = AVI_HEADER_SIZE - 12; AVIOUT4("LIST"); AVIOUTd(_written + 4); /* Length of list in bytes */ AVIOUT4("movi"); /* First add the index table to the end */ memcpy(_index, "idx1", 4); host_writed(_index+4, _indexused - 8 ); fwrite(_index, 1, _indexused, _handle); fseek(_handle, 0, SEEK_SET); fwrite(&_avi_header, 1, AVI_HEADER_SIZE, _handle); fclose(_handle); free(_index); free(_buf); _handle = 0; }