HIR::TypeRef HIR::TypeRef::get_field(size_t idx, size_t& ofs) const { if( const auto* w = this->get_wrapper() ) { if( w->type == TypeWrapper::Ty::Slice ) { // TODO throw "TODO"; } else if( w->type == TypeWrapper::Ty::Array ) { LOG_ASSERT(idx < w->size, "Getting field on array with OOB index - " << idx << " >= " << w->size << " - " << *this); auto ity = this->get_inner(); ofs = ity.get_size() * idx; return ity; } else { throw "ERROR"; } } else { if( this->inner_type == RawType::Composite ) { LOG_ASSERT(idx < this->composite_type->fields.size(), "Field " << idx << " out of bounds in type " << *this); ofs = this->composite_type->fields.at(idx).first; return this->composite_type->fields.at(idx).second; } else { ::std::cerr << *this << " doesn't have fields" << ::std::endl; throw "ERROR"; } } }
int registerWebHistory(JNIEnv* env) { // Get notified of all changes to history items. WebCore::notifyHistoryItemChanged = historyItemChanged; #ifdef UNIT_TEST unit_test(); #endif // Find WebHistoryItem, its constructor, and the update method. jclass clazz = env->FindClass("android/webkit/WebHistoryItem"); LOG_ASSERT(clazz, "Unable to find class android/webkit/WebHistoryItem"); gWebHistoryItem.mInit = env->GetMethodID(clazz, "<init>", "()V"); LOG_ASSERT(gWebHistoryItem.mInit, "Could not find WebHistoryItem constructor"); gWebHistoryItem.mUpdate = env->GetMethodID(clazz, "update", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Landroid/graphics/Bitmap;[B)V"); LOG_ASSERT(gWebHistoryItem.mUpdate, "Could not find method update in WebHistoryItem"); // Find the field ids for mTitle and mUrl. gWebHistoryItem.mTitle = env->GetFieldID(clazz, "mTitle", "Ljava/lang/String;"); LOG_ASSERT(gWebHistoryItem.mTitle, "Could not find field mTitle in WebHistoryItem"); gWebHistoryItem.mUrl = env->GetFieldID(clazz, "mUrl", "Ljava/lang/String;"); LOG_ASSERT(gWebHistoryItem.mUrl, "Could not find field mUrl in WebHistoryItem"); env->DeleteLocalRef(clazz); // Find the WebBackForwardList object and method. clazz = env->FindClass("android/webkit/WebBackForwardList"); LOG_ASSERT(clazz, "Unable to find class android/webkit/WebBackForwardList"); gWebBackForwardList.mAddHistoryItem = env->GetMethodID(clazz, "addHistoryItem", "(Landroid/webkit/WebHistoryItem;)V"); LOG_ASSERT(gWebBackForwardList.mAddHistoryItem, "Could not find method addHistoryItem"); gWebBackForwardList.mRemoveHistoryItem = env->GetMethodID(clazz, "removeHistoryItem", "(I)V"); LOG_ASSERT(gWebBackForwardList.mRemoveHistoryItem, "Could not find method removeHistoryItem"); gWebBackForwardList.mSetCurrentIndex = env->GetMethodID(clazz, "setCurrentIndex", "(I)V"); LOG_ASSERT(gWebBackForwardList.mSetCurrentIndex, "Could not find method setCurrentIndex"); env->DeleteLocalRef(clazz); int result = jniRegisterNativeMethods(env, "android/webkit/WebBackForwardList", gWebBackForwardListMethods, NELEM(gWebBackForwardListMethods)); return (result < 0) ? result : jniRegisterNativeMethods(env, "android/webkit/WebHistoryItem", gWebHistoryItemMethods, NELEM(gWebHistoryItemMethods)); }
ssize_t VectorImpl::replaceAt(const void* prototype, size_t index) { LOG_ASSERT(index<size(), "[%p] replace: index=%d, size=%d", this, (int)index, (int)size()); void* item = editItemLocation(index); if (item == 0) return NO_MEMORY; _do_destroy(item, 1); if (prototype == 0) { _do_construct(item, 1); } else { _do_copy(item, prototype, 1); } return ssize_t(index); }
void WebCoreResourceLoader::Error(JNIEnv* env, jobject obj, jint id, jstring description, jstring failingUrl) { #ifdef ANDROID_INSTRUMENT TimeCounterAuto counter(TimeCounter::ResourceTimeCounter); #endif LOGV("webcore_resourceloader error"); WebCore::ResourceHandle* handle = GET_NATIVE_HANDLE(env, obj); LOG_ASSERT(handle, "nativeError must take a valid handle!"); // ResourceLoader::didFail() can set handle to be NULL, we need to check if (!handle) return; handle->client()->didFail(handle, WebCore::ResourceError("", id, to_string(env, failingUrl), to_string(env, description))); }
static char* allocFromUTF8(const char* in, size_t len) { if (len > 0) { SharedBuffer* buf = SharedBuffer::alloc(len+1); LOG_ASSERT(buf, "Unable to allocate shared buffer"); if (buf) { char* str = (char*)buf->data(); memcpy(str, in, len); str[len] = 0; return str; } return NULL; } return getEmptyString(); }
void Loading::loadTheBackgroundTexture() { LOG_ASSERT(NULL != gfx); if (!lp_CONFIG->skin_name.empty() && TA3D::Paths::Exists(lp_CONFIG->skin_name)) { SKIN skin; skin.load_tdf(lp_CONFIG->skin_name); if (!skin.prefix.empty()) pBackgroundTexture = gfx->load_texture_mask("gfx" + Paths::SeparatorAsString + "load.jpg", 7); else pBackgroundTexture = gfx->load_texture_mask("gfx" + Paths::SeparatorAsString + "load.jpg", 7); } else pBackgroundTexture = gfx->load_texture_mask("gfx" + Paths::SeparatorAsString + "load.jpg", 7); }
OkState RpcServerManager::removeRemote(const std::string &name, const std::string &spec) { const NamedService *old = _rpcsrvmap.lookup(name); if (old == nullptr) { // was alright already, remove any reservation too _rpcsrvmap.removeReservation(name); return OkState(0, "already done"); } if (old->getSpec() != spec) { return OkState(1, "name registered, but with different spec"); } std::unique_ptr<NamedService> td = _rpcsrvmap.remove(name); LOG_ASSERT(td.get() == old); return OkState(0, "done"); }
VectorImpl& VectorImpl::operator = (const VectorImpl& rhs) { LOG_ASSERT(mItemSize == rhs.mItemSize, "Vector<> have different types (this=%p, rhs=%p)", this, &rhs); if (this != &rhs) { release_storage(); if (rhs.mCount) { mStorage = rhs.mStorage; mCount = rhs.mCount; SharedBuffer::sharedBuffer(mStorage)->acquire(); } else { mStorage = 0; mCount = 0; } } return *this; }
Data FileUtils::readDataFromZip(const std::string& zipFilePath, const std::string& filename, size_t *size) { Data retData; unzFile file = nullptr; *size = 0; do { BREAK_IF(zipFilePath.empty()); file = unzOpen(zipFilePath.c_str()); BREAK_IF(!file); // FIXME: Other platforms should use upstream minizip like mingw-w64 #ifdef MINIZIP_FROM_SYSTEM int ret = unzLocateFile(file, filename.c_str(), NULL); #else int ret = unzLocateFile(file, filename.c_str(), 1); #endif BREAK_IF(UNZ_OK != ret); char filePathA[260]; unz_file_info fileInfo; ret = unzGetCurrentFileInfo(file, &fileInfo, filePathA, sizeof(filePathA), nullptr, 0, nullptr, 0); BREAK_IF(UNZ_OK != ret); ret = unzOpenCurrentFile(file); BREAK_IF(UNZ_OK != ret); unsigned char * buffer = (unsigned char*)malloc(fileInfo.uncompressed_size); int readedSize = unzReadCurrentFile(file, buffer, static_cast<unsigned>(fileInfo.uncompressed_size)); LOG_ASSERT(readedSize == 0 || readedSize == (int)fileInfo.uncompressed_size, "the file size is wrong"); UNUSED_ARG(readedSize); *size = fileInfo.uncompressed_size; unzCloseCurrentFile(file); retData.fastSet(buffer, *size, true); } while (0); if (file) { unzClose(file); } return retData; }
size_t GenericHeader::Tag::getSize() const { size_t ret = _name.size() + 2; switch (_type) { case TYPE_FLOAT: case TYPE_INTEGER: ret += 8; break; case TYPE_STRING: ret += _sVal.size() + 1; break; default: LOG_ASSERT(false); } return ret; }
void RefBase::decStrong(const void* id) const { weakref_impl* const refs = mRefs; refs->removeStrongRef(id); const int32_t c = android_atomic_dec(&refs->mStrong); #if PRINT_REFS LOGD("decStrong of %p from %p: cnt=%d\n", this, id, c); #endif LOG_ASSERT(c >= 1, "decStrong() called on %p too many times", refs); if (c == 1) { refs->mBase->onLastStrongRef(id); if ((refs->mFlags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { delete this; } } refs->decWeak(id); }
// // BaseSocketManager::GetHostByName - Chapter 19, page 683 // unsigned int BaseSocketManager::GetHostByName(const std::string &hostName) { //This will retrieve the ip details and put it into pHostEnt structure struct hostent *pHostEnt = gethostbyname(hostName.c_str()); struct sockaddr_in tmpSockAddr; //placeholder for the ip address if(pHostEnt == NULL) { LOG_ASSERT(0 && _T("Error occured")); return 0; } tmpSockAddr.sin_addr.s_addr = *(u_long *)pHostEnt->h_addr; //char* test = inet_ntoa(tmpSockAddr.sin_addr); //memcpy(&tmpSockAddr.sin_addr,pHostEnt->h_addr,pHostEnt->h_length); return ntohl(tmpSockAddr.sin_addr.s_addr); }
bool FileAllocator::attach(FILE *f, FileOffset reserved_space, bool init) { #ifdef DEBUG_FA printf("FileAllocator::attach()\n"); #endif LOG_ASSERT(f != 0); this->f = f; this->reserved_space = reserved_space; if (fseek(this->f, 0, SEEK_END)) throw GenericException(__FILE__, __LINE__, "fseek error"); file_size = ftell(this->f); if (file_size < 0) throw GenericException(__FILE__, __LINE__, "ftell error"); // File size should be // 0 for new file or at least reserved_space + list headers if (file_size == 0) { if (init == false) throw GenericException(__FILE__, __LINE__, "FileAllocator in read-only mode found empty file"); // create empty list headers memset(&allocated_head, 0, list_node_size); memset(&free_head, 0, list_node_size); write_node(this->reserved_space, &allocated_head); write_node(this->reserved_space+list_node_size, &free_head); file_size = ftell(this->f); FileOffset expected = reserved_space + 2*list_node_size; if (file_size != expected) throw GenericException(__FILE__, __LINE__, "Initialization error: " "Expected file size %ld, found %ld", (long) expected, (long) file_size); return true; } FileOffset expected = this->reserved_space + 2*list_node_size; if (file_size < expected) throw GenericException(__FILE__, __LINE__, "FileAllocator: Broken file header," "expected at least %ld bytes", (long) expected); // read existing list headers read_node(this->reserved_space, &allocated_head); read_node(this->reserved_space+list_node_size, &free_head); return false; // Didn't initialize the file }
bool FileUtils::writeDataToFile(Data retData, const std::string& fullPath) { LOG_ASSERT(!fullPath.empty() && retData.getSize() != 0, "Invalid parameters."); do { size_t size = 0; const char* mode = "wb"; // Read the file from hardware FILE *fp = fopen(fullPath.c_str(), mode); BREAK_IF(!fp); size = retData.getSize(); fwrite(retData.getBytes(), size, 1, fp); fclose(fp); return true; } while (0); return false; }
vespalib::asciistream & operator<<(vespalib::asciistream &out, const GenericHeader::Tag &tag) { switch (tag.getType()) { case GenericHeader::Tag::TYPE_FLOAT: out << tag.asFloat(); break; case GenericHeader::Tag::TYPE_INTEGER: out << tag.asInteger(); break; case GenericHeader::Tag::TYPE_STRING: out << tag.asString(); break; default: LOG_ASSERT(false); } return out; }
void RefBase::incStrong(const void* id) const { weakref_impl* const refs = mRefs; refs->incWeak(id); refs->addStrongRef(id); const int32_t c = android_atomic_inc(&refs->mStrong); LOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs); #if PRINT_REFS LOGD("incStrong of %p from %p: cnt=%d\n", this, id, c); #endif if (c != INITIAL_STRONG_VALUE) { return; } android_atomic_add(-INITIAL_STRONG_VALUE, &refs->mStrong); refs->mBase->onFirstRef(); }
static char* allocFromUTF16(const char16_t* in, size_t len) { if (len == 0) return getEmptyString(); const size_t bytes = utf8_length_from_utf16(in, len); SharedBuffer* buf = SharedBuffer::alloc(bytes+1); LOG_ASSERT(buf, "Unable to allocate shared buffer"); if (buf) { char* str = (char*)buf->data(); utf16_to_utf8(in, len, str, bytes+1); return str; } return getEmptyString(); }
ScanQueryProcessor::~ScanQueryProcessor() { if (!mData) { return; } std::error_code ec; if (mBuffer) { mData->writeLast(mBuffer, mBufferWriter.data(), ec); } else { LOG_ASSERT(mTupleCount == 0, "Invalid buffer containing tuples"); mData->writeLast(ec); } if (ec) { // TODO FIXME This leads to a leak (the ServerSocket does not notice that the scan has finished) LOG_ERROR("Error while flushing buffer [error = %1% %2%]", ec, ec.message()); } LOG_DEBUG("Scan processor done [totalWritten = %1%]", mTotalWritten); }
void Descriptor::updateBaseVersion() { auto index = blockIndex(mBaseVersion + 1); // Process version blocks where all versions are marked as committed // Release the block and increase base version so that it is aligned to the next block for (; mDescriptor[index] == std::numeric_limits<BlockType>::max(); index = ((index + 1) % CAPACITY)) { mBaseVersion += ((mBaseVersion % BITS_PER_BLOCK != 0) ? (BITS_PER_BLOCK - (mBaseVersion % BITS_PER_BLOCK)) : BITS_PER_BLOCK); mDescriptor[index] = 0x0u; } // Process the version block where the versions are only partially marked as committed // Check the block bit by bit and increase base version until the first uncommitted version is encountered for (; (mDescriptor[index] & (0x1u << (mBaseVersion % BITS_PER_BLOCK))) != 0x0u; ++mBaseVersion) { } LOG_ASSERT(blockIndex(mBaseVersion + 1) == index, "Base version and block index do not match"); }
void Client::populate(int16_t lower, int16_t upper, bool useCH) { mCmds.execute<Command::POPULATE_WAREHOUSE>( [this, lower, upper, useCH](const err_code &ec, const std::tuple<bool, crossbow::string> &res) { if (ec) { LOG_ERROR(ec.message()); return; } LOG_ASSERT(std::get<0>(res), std::get<1>(res)); LOG_INFO(("Populated Warehouse " + crossbow::to_string(lower))); if (lower == upper) { mSocket.shutdown(Socket::shutdown_both); mSocket.close(); return; // population done } populate(lower + 1, upper, useCH); }, std::make_tuple(lower, useCH)); }
bool RefBase::weakref_type::attemptIncWeak(const void* id) { weakref_impl* const impl = static_cast<weakref_impl*>(this); int32_t curCount = impl->mWeak; LOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow", this); while (curCount > 0) { if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mWeak) == 0) { break; } curCount = impl->mWeak; } if (curCount > 0) { impl->addWeakRef(id); } return curCount > 0; }
void PhysicsComponent::BuildRigidBodyTransform(tinyxml2::XMLElement* pTransformElement) { // FUTURE WORK Mrmike - this should be exactly the same as the TransformComponent - maybe factor into a helper method? LOG_ASSERT(pTransformElement); tinyxml2::XMLElement* pPositionElement = pTransformElement->FirstChildElement("Position"); if (pPositionElement) { double x = 0; double y = 0; double z = 0; x = std::stod(pPositionElement->Attribute("x")); y = std::stod(pPositionElement->Attribute("y")); z = std::stod(pPositionElement->Attribute("z")); m_RigidBodyLocation = glm::vec3(x, y, z); } tinyxml2::XMLElement* pOrientationElement = pTransformElement->FirstChildElement("Orientation"); if (pOrientationElement) { double yaw = 0; double pitch = 0; double roll = 0; yaw = std::stod(pOrientationElement->Attribute("yaw")); pitch = std::stod(pOrientationElement->Attribute("pitch")); roll = std::stod(pOrientationElement->Attribute("roll")); m_RigidBodyOrientation = glm::vec3((float)DEGREES_TO_RADIANS(yaw), (float)DEGREES_TO_RADIANS(pitch), (float)DEGREES_TO_RADIANS(roll)); } tinyxml2::XMLElement* pScaleElement = pTransformElement->FirstChildElement("Scale"); if (pScaleElement) { double x = 0; double y = 0; double z = 0; x = std::stod(pScaleElement->Attribute("x")); y = std::stod(pScaleElement->Attribute("y")); z = std::stod(pScaleElement->Attribute("z")); m_RigidBodyScale = glm::vec3((float)x, (float)y, (float)z); } }
size_t GenericHeader::Tag::write(DataBuffer &buf) const { int pos = buf.getDataLen(); buf.writeBytes(_name.c_str(), _name.size() + 1); buf.writeInt8(_type); switch (_type) { case TYPE_FLOAT: buf.writeDouble(_fVal); break; case TYPE_INTEGER: buf.writeInt64(_iVal); break; case TYPE_STRING: buf.writeBytes(_sVal.c_str(), _sVal.size() + 1); break; default: LOG_ASSERT(false); } return buf.getDataLen() - pos; }
ChainConfig(size_t n, const ParameterConfig& initialParamConf, const Proposal* propFunc = nullptr) : // in case of parallel tempering, setup more than one chain fPtChains( n, Chain() ), // prepare parameter configurations fDynamicParamConfigs( n, initialParamConf ), // clone the default proposal function fProposalFunctions( n ) { LOG_ASSERT( n > 0, "A Metropolis chain set requires at least 1 chain" << " (and corresponding beta value)."); if (propFunc) for (auto& p : fProposalFunctions) p.reset( propFunc->Clone() ); if (n > 0) { fNProposedSwaps.assign( n-1, 0 ); fNAcceptedSwaps.assign( n-1, 0 ); } }
void RefBase::forceIncStrong(const void* id) const { weakref_impl* const refs = mRefs; refs->incWeak(id); refs->addStrongRef(id); const int32_t c = android_atomic_inc(&refs->mStrong); LOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow", refs); #if PRINT_REFS LOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c); #endif switch (c) { case INITIAL_STRONG_VALUE: android_atomic_add(-INITIAL_STRONG_VALUE, &refs->mStrong); // fall through... case 0: refs->mBase->onFirstRef(); } }
Record::Record(Schema schema) : mSchema(std::move(schema)), mVariableOffset(0u) { auto count = mSchema.fixedSizeFields().size() + mSchema.varSizeFields().size(); mIdMap.reserve(count); mFieldMetaData.reserve(count); mStaticSize = crossbow::align(mSchema.nullFields(), 8u); #ifndef NDEBUG auto lastAlignment = std::numeric_limits<size_t>::max(); for (const auto& field : mSchema.fixedSizeFields()) { auto alignment = field.alignOf(); LOG_ASSERT(lastAlignment >= alignment, "Alignment not in descending order"); lastAlignment = alignment; } #endif size_t idx = 0; uint16_t nullIdx = 0; for (const auto& field : mSchema.fixedSizeFields()) { mIdMap.insert(std::make_pair(field.name(), idx)); mFieldMetaData.emplace_back(field, mStaticSize, field.isNotNull() ? 0 : nullIdx++); mStaticSize += field.staticSize(); ++idx; } if (!mSchema.varSizeFields().empty()) { mStaticSize = crossbow::align(mStaticSize, 4u); mVariableOffset = mStaticSize; for (const auto& field : mSchema.varSizeFields()) { mIdMap.insert(std::make_pair(field.name(), idx)); mFieldMetaData.emplace_back(field, mStaticSize, field.isNotNull() ? 0 : nullIdx++); mStaticSize += sizeof(uint32_t); ++idx; } // Allocate an additional entry for the last offset mStaticSize += sizeof(uint32_t); } }
static void location_callback(GpsLocation* location) { JNIEnv* env = AndroidRuntime::getJNIEnv(); jbyteArray byteArray = env->NewByteArray(location->rawDataSize); LOG_ASSERT(byteArray, "Native could not create new byte[]"); env->SetByteArrayRegion(byteArray, 0, location->rawDataSize, (const jbyte *) location->rawData ); jstring java_string_map_url = NULL; if ((location->flags & GPS_LOCATION_HAS_MAP_URL) == GPS_LOCATION_HAS_MAP_URL) { java_string_map_url = env->NewStringUTF(location->map_url); } jstring java_string_map_index = NULL; if ((location->flags & GPS_LOCATION_HAS_MAP_INDEX) == GPS_LOCATION_HAS_MAP_INDEX) { char uuid_string_buf [UUID_STRING_LENGTH]; convert_uuid_from_byte_array_to_string (location->map_index, uuid_string_buf); java_string_map_index = env->NewStringUTF(uuid_string_buf); } env->CallVoidMethod(mCallbacksObj, method_reportLocation, location->flags, (jdouble)location->latitude, (jdouble)location->longitude, (jdouble)location->altitude, (jfloat)location->speed, (jfloat)location->bearing, (jfloat)location->accuracy, (jlong)location->timestamp,location->position_source, byteArray, (jboolean)location->is_indoor, (jfloat)location->floor_number, java_string_map_url, java_string_map_index); env->DeleteLocalRef(byteArray); if (java_string_map_url != NULL) { env->DeleteLocalRef(java_string_map_url); } if (java_string_map_index != NULL) { env->DeleteLocalRef(java_string_map_index); } checkAndClearExceptionFromCallback(env, __FUNCTION__); }
NTSTATUS Ext_CryptBlocks(PEXTENSION_CONTEXT ExtContext, PMDL pSourceMdl, PMDL pTargetMdl, SIZE_T size, SIZE_T sector, BOOLEAN Encrypt) { NTSTATUS status = STATUS_SUCCESS; PVOID pSource = NULL, pTarget = NULL; CONST SIZE_T SectorSize = 512; SIZE_T SectorOffset = 0; if (!ExtContext || !pSourceMdl || !pTargetMdl) return STATUS_INVALID_PARAMETER; if (!ExtContext->pCipherContext) return STATUS_SUCCESS; pSource = MmGetSystemAddressForMdlSafe(pSourceMdl, NormalPagePriority); pTarget = MmGetSystemAddressForMdlSafe(pTargetMdl, NormalPagePriority); if (!pSource || !pTarget) return STATUS_INSUFFICIENT_RESOURCES; LOG_ASSERT(0 == size % SectorSize); EXTLOG(LL_VERBOSE, "VHD: %s 0x%X bytes\n", Encrypt ? "Encrypting" : "Decrypting", size); for (SectorOffset = 0; SectorOffset < size; SectorOffset += SectorSize) { PUCHAR pSourceSector = (PUCHAR)pSource + SectorOffset; PUCHAR pTargetSector = (PUCHAR)pTarget + SectorOffset; status = (Encrypt ? ExtContext->pCipherEngine->pfnEncrypt : ExtContext->pCipherEngine->pfnDecrypt)( ExtContext->pCipherContext, pSourceSector, pTargetSector, SectorSize, sector++); if (!NT_SUCCESS(status)) break; } if (pSourceMdl && 0 != (pSourceMdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)) MmUnmapLockedPages(pSource, pSourceMdl); if (pTargetMdl && 0 != (pTargetMdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)) MmUnmapLockedPages(pTarget, pTargetMdl); return status; }
jbyteArray WebHistory::Flatten(JNIEnv* env, WTF::Vector<char>& v, WebCore::HistoryItem* item) { if (!item) return NULL; // Reserve a vector of chars with an initial size of HISTORY_MIN_SIZE. v.reserveCapacity(HISTORY_MIN_SIZE); // Write the top-level history item and then write all the children // recursively. LOG_ASSERT(item->bridge(), "Why don't we have a bridge object here?"); write_item(v, item); write_children_recursive(v, item); // Try to create a new java byte array. jbyteArray b = env->NewByteArray(v.size()); if (!b) return NULL; // Write our flattened data to the java array. env->SetByteArrayRegion(b, 0, v.size(), (const jbyte*)v.data()); return b; }
uint64_t RemoteCounter::incrementAndGet(store::ClientHandle& handle) { if (mCounter == 0x0u && !mInit) { mInit = true; requestNewBatch(handle); } mFreshKeys.wait(handle.fiber(), [this] () { return (mCounter != mReserved) || (mNextCounter != 0x0u); }); if (mCounter == mReserved) { LOG_ASSERT(mNextCounter != 0x0u, "Next counter must be non 0"); mCounter = mNextCounter; mReserved = mNextCounter + RESERVED_BATCH; mNextCounter = 0x0u; } auto key = ++mCounter; if (mCounter + THRESHOLD == mReserved) { requestNewBatch(handle); } return key; }