static bool canCallGetUserMedia(Document& document, String& errorMessage) { bool requiresSecureConnection = document.frame()->settings().mediaCaptureRequiresSecureConnection(); if (requiresSecureConnection && !isSecure(*document.loader())) { errorMessage = "Trying to call getUserMedia from an insecure document."; return false; } auto& topDocument = document.topDocument(); if (&document != &topDocument) { auto& topOrigin = *topDocument.topOrigin(); if (!document.securityOrigin()->isSameSchemeHostPort(&topOrigin)) { errorMessage = "Trying to call getUserMedia from a document with a different security origin than its top-level frame."; return false; } for (auto* ancestorDocument = document.parentDocument(); ancestorDocument != &topDocument; ancestorDocument = ancestorDocument->parentDocument()) { if (requiresSecureConnection && !isSecure(*ancestorDocument->loader())) { errorMessage = "Trying to call getUserMedia from a document with an insecure parent frame."; return false; } if (!ancestorDocument->securityOrigin()->isSameSchemeHostPort(&topOrigin)) { errorMessage = "Trying to call getUserMedia from a document with a different security origin than its top-level frame."; return false; } } } return true; }
void Layer::setGeometry( const sp<const DisplayDevice>& hw, HWComposer::HWCLayerInterface& layer) { LayerBaseClient::setGeometry(hw, layer); // enable this layer layer.setSkip(false); // we can't do alpha-fade with the hwc HAL const State& s(drawingState()); if (s.alpha < 0xFF) { layer.setSkip(true); } if (isSecure() && !hw->isSecure()) { layer.setSkip(true); } /* * Transformations are applied in this order: * 1) buffer orientation/flip/mirror * 2) state transformation (window manager) * 3) layer orientation (screen orientation) * (NOTE: the matrices are multiplied in reverse order) */ const Transform bufferOrientation(mCurrentTransform); const Transform tr(hw->getTransform() * s.transform * bufferOrientation); // this gives us only the "orientation" component of the transform const uint32_t finalTransform = tr.getOrientation(); // we can only handle simple transformation if (finalTransform & Transform::ROT_INVALID) { layer.setSkip(true); } else { layer.setTransform(finalTransform); } layer.setCrop(computeBufferCrop()); // [MTK] {{{ hwc_color_t color; color.a = s.alpha; layer.setFillColor(color); layer.setTransform(finalTransform); layer.setIdentity(getIdentity()); layer.setMatrix(tr); layer.setSecure((isSecure() || isProtected())); // [MTK] }}} }
Sint32 MP_Socket::accept() { #ifndef PEGASUS_OS_ZOS return 1; #else PEG_METHOD_ENTER(TRC_SSL, "MP_Socket::accept()"); // ************************************************************************ // This is a z/OS specific section. No other platform can port this. // Pegasus on z/OS has no OpenSSL but cat use a transparent layer called // AT-TLS ( Applicatin Transparent Transport Layer Security ) to handle // HTTPS connections. // ************************************************************************ int rc; if (isSecure()) { PEG_TRACE_CSTRING(TRC_SSL, Tracer::LEVEL4, "---> HTTPS processing."); rc = ATTLS_zOS_query(); } else { // ******************************************************************** // If the socket is a UNIX Domain socket on z/OS, the local security // credentials are read form the socket. // ******************************************************************** LocalSocket_zOS_query(); PEG_TRACE_CSTRING(TRC_SSL, Tracer::LEVEL4, "---> Normal HTTP processing."); rc = 1; } PEG_METHOD_EXIT(); return rc; #endif }
Socket::Status TcpSocket::receive(void* data, std::size_t size, std::size_t& received) { // First clear the variables to fill received = 0; // Check the destination buffer if (!data) { err() << "Cannot receive data from the network (the destination buffer is invalid)" << std::endl; return Error; } // Receive a chunk of bytes int sizeReceived; if (isSecure()) sizeReceived = mbedtls_ssl_read(&getSecureData().ssl, static_cast<unsigned char*>(data), size); else sizeReceived = recv(getHandle(), static_cast<char*>(data), static_cast<int>(size), flags); // Check the number of bytes received if (sizeReceived > 0) { received = static_cast<std::size_t>(sizeReceived); return Done; } else if (sizeReceived == 0) { return Socket::Disconnected; } else { return priv::SocketImpl::getErrorStatus(); } }
Socket::Status TcpSocket::send(const void* data, std::size_t size, std::size_t& sent) { // Check the parameters if (!data || (size == 0)) { err() << "Cannot send data over the network (no data to send)" << std::endl; return Error; } // Loop until every byte has been sent int result = 0; for (sent = 0; sent < size; sent += result) { // Send a chunk of data if (isSecure()) result = mbedtls_ssl_write(&getSecureData().ssl, static_cast<const unsigned char*>(data) + sent, size - sent); else result = ::send(getHandle(), static_cast<const char*>(data) + sent, size - sent, flags); // Check for errors if (result < 0) { Status status = priv::SocketImpl::getErrorStatus(); if ((status == NotReady) && sent) return Partial; return status; } } return Done; }
int CtZrtpSession::startIfNotStarted(unsigned int uiSSRC, int streamNm) { if (!(streamNm >= 0 && streamNm < AllStreams && streams[streamNm] != NULL)) return 0; if ((streamNm == VideoStream && !isSecure(AudioStream)) || streams[streamNm]->started) return 0; start(uiSSRC, streamNm == VideoStream ? CtZrtpSession::VideoStream : CtZrtpSession::AudioStream); return 0; }
// Decode header and check parameters/validity for inbound short secureable frame. // The buffer starts with the fl frame length byte. // // Parameters: // * buf buffer to decode header from, of at least length buflen; never NULL // * buflen available length in buf; if too small for encoded header routine will fail (return 0) // // Performs as many as possible of the 'Quick Integrity Checks' from the spec, eg SecureBasicFrame-V0.1-201601.txt // 1) fl >= 4 (type, seq/il, bl, trailer bytes) // 2) fl may be further constrained by system limits, typically to <= 63 // 3) type (the first frame byte) is never 0x00, 0x80, 0x7f, 0xff. // 4) il <= 8 for initial implementations (internal node ID is 8 bytes) // 5) il <= fl - 4 (ID length; minimum of 4 bytes of other overhead) // 6) bl <= fl - 4 - il (body length; minimum of 4 bytes of other overhead) // 7) the final frame byte (the final trailer byte) is never 0x00 nor 0xff (if whole frame available) // 8) tl == 1 for non-secure, tl >= 1 for secure (tl = fl - 3 - il - bl) // Note: fl = hl-1 + bl + tl = 3+il + bl + tl // // (If the header is invalid or the buffer too small, 0 is returned to indicate an error.) // The fl byte in the structure is set to the frame length, else 0 in case of any error. // Returns number of bytes of decoded header including nominally-leading fl length byte; 0 in case of error. uint8_t SecurableFrameHeader::checkAndDecodeSmallFrameHeader(const uint8_t *const buf, uint8_t buflen) { // Make frame 'invalid' until everything is finished and checks out. fl = 0; // If buf is NULL or clearly too small to contain a valid header then return an error. if(NULL == buf) { return(0); } // ERROR if(buflen < 4) { return(0); } // ERROR // Quick integrity checks from spec. // 1) fl >= 4 (type, seq/il, bl, trailer bytes) const uint8_t fl_ = buf[0]; if(fl_ < 4) { return(0); } // ERROR // 2) fl may be further constrained by system limits, typically to < 64, eg for 'small' frame. if(fl_ > maxSmallFrameSize) { return(0); } // ERROR // 3) type (the first frame byte) is never 0x00, 0x80, 0x7f, 0xff. fType = buf[1]; const bool secure_ = isSecure(); const FrameType_Secureable fType_ = (FrameType_Secureable)(fType & 0x7f); if((FTS_NONE == fType_) || (fType_ >= FTS_INVALID_HIGH)) { return(0); } // ERROR // 4) il <= 8 for initial implementations (internal node ID is 8 bytes) seqIl = buf[2]; const uint8_t il_ = getIl(); if(il_ > maxIDLength) { return(0); } // ERROR // 5) il <= fl - 4 (ID length; minimum of 4 bytes of other overhead) if(il_ > fl_ - 4) { return(0); } // ERROR // Header length including frame length byte. const uint8_t hlifl = 4 + il_; // If buffer doesn't contain enough data for the full header then return an error. if(hlifl > buflen) { return(0); } // ERROR // Capture the ID bytes, in the storage in the instance, if any. if(il_ > 0) { memcpy(id, buf+3, il_); } // 6) bl <= fl - 4 - il (body length; minimum of 4 bytes of other overhead) const uint8_t bl_ = buf[hlifl - 1]; if(bl_ > fl_ - hlifl) { return(0); } // ERROR bl = bl_; // 7) ONLY CHECKED IF FULL FRAME AVAILABLE: the final frame byte (the final trailer byte) is never 0x00 nor 0xff if(buflen > fl_) { const uint8_t lastByte = buf[fl_]; if((0x00 == lastByte) || (0xff == lastByte)) { return(0); } // ERROR } // 8) tl == 1 for non-secure, tl >= 1 for secure (tl = fl - 3 - il - bl) const uint8_t tl_ = fl_ - 3 - il_ - bl; // Same calc, but getTl() can't be used as fl not yet set. if(!secure_) { if(1 != tl_) { return(0); } } // ERROR else if(0 == tl_) { return(0); } // ERROR // Set fl field to valid value as last action / side-effect. fl = fl_; // Return decoded header length including frame-length byte; body should immediately follow. return(hlifl); // SUCCESS! }
ConnectionData* UserConnection::getPluginObject() noexcept { resetEntity(); pod.ip = pluginString(getRemoteIp()); pod.object = this; pod.port = Util::toInt(port); pod.protocol = isSet(UserConnection::FLAG_NMDC) ? PROTOCOL_NMDC : PROTOCOL_ADC; // TODO: isSet(...) not practical if more than two protocols pod.isOp = isSet(UserConnection::FLAG_OP) ? True : False; pod.isSecure = isSecure() ? True : False; return &pod; }
/*! Returns the raw form of this QNetworkCookie. The QByteArray returned by this function is suitable for an HTTP header, either in a server response (the Set-Cookie header) or the client request (the Cookie header). You can choose from one of two formats, using \a form. \sa parseCookies() */ QByteArray QNetworkCookie::toRawForm(RawForm form) const { QByteArray result; if (d->name.isEmpty()) return result; // not a valid cookie result = d->name; result += '='; if ((d->value.contains(';') || d->value.contains(',') || d->value.contains(' ') || d->value.contains('"')) && (!d->value.startsWith('"') && !d->value.endsWith('"'))) { result += '"'; QByteArray value = d->value; value.replace('"', "\\\""); result += value; result += '"'; } else { result += d->value; } if (form == Full) { // same as above, but encoding everything back if (isSecure()) result += "; secure"; if (isHttpOnly()) result += "; HttpOnly"; if (!isSessionCookie()) { result += "; expires="; result += QLocale::c().toString(d->expirationDate.toUTC(), QLatin1String("ddd, dd-MMM-yyyy hh:mm:ss 'GMT")).toLatin1(); } if (!d->domain.isEmpty()) { result += "; domain="; QString domainNoDot = d->domain; if (domainNoDot.startsWith(QLatin1Char('.'))) { result += '.'; domainNoDot = domainNoDot.mid(1); } result += QUrl::toAce(domainNoDot); } if (!d->path.isEmpty()) { result += "; path="; result += QUrl::toPercentEncoding(d->path, "/"); } } return result; }
ConnectionInfo CWizardIrcConnection::connection() const { ConnectionInfo connection; connection.nick = nickName(); connection.real = realName(); connection.host = hostName(); connection.port = port(); connection.secure = isSecure(); connection.user = userName(); connection.pass = password(); connection.name = connectionName(); return connection; }
ConnectionInfo Session::toConnection() const { ConnectionInfo connection; connection.name = name(); connection.secure = isSecure(); connection.host = host(); connection.port = port(); connection.user = userName(); connection.nick = nickName(); connection.real = realName(); connection.pass = password(); connection.channels = channels(); connection.quit = m_quit; return connection; }
/*! Returns the raw form of this QNetworkCookie. The QByteArray returned by this function is suitable for an HTTP header, either in a server response (the Set-Cookie header) or the client request (the Cookie header). You can choose from one of two formats, using \a form. \sa parseCookies() */ QByteArray QNetworkCookie::toRawForm(RawForm form) const { QByteArray result; if (d->name.isEmpty()) return result; // not a valid cookie result = d->name; result += '='; result += d->value; if (form == Full) { // same as above, but encoding everything back if (isSecure()) result += "; secure"; if (isHttpOnly()) result += "; HttpOnly"; if (!isSessionCookie()) { result += "; expires="; result += QLocale::c().toString(d->expirationDate.toUTC(), QLatin1String("ddd, dd-MMM-yyyy hh:mm:ss 'GMT")).toLatin1(); } if (!d->domain.isEmpty()) { result += "; domain="; if (d->domain.startsWith(QLatin1Char('.'))) { result += '.'; result += QUrl::toAce(d->domain.mid(1)); } else { QHostAddress hostAddr(d->domain); if (hostAddr.protocol() == QAbstractSocket::IPv6Protocol) { result += '['; result += d->domain.toUtf8(); result += ']'; } else { result += QUrl::toAce(d->domain); } } } if (!d->path.isEmpty()) { result += "; path="; result += d->path.toUtf8(); } } return result; }
api_return SessionApi::handleSocketConnect(ApiRequest& aRequest, bool aIsSecure, const WebSocketPtr& aSocket) { auto sessionToken = JsonUtil::getField<string>("authorization", aRequest.getRequestBody(), false); auto session = WebServerManager::getInstance()->getUserManager().getSession(sessionToken); if (!session) { aRequest.setResponseErrorStr("Invalid session token"); return websocketpp::http::status_code::bad_request; } if (session->isSecure() != aIsSecure) { aRequest.setResponseErrorStr("Invalid protocol"); return websocketpp::http::status_code::bad_request; } session->onSocketConnected(aSocket); aSocket->setSession(session); return websocketpp::http::status_code::ok; }
virtual void render(std::ostream& out) const { if(!del) { cgicc::HTTPCookie::render(out); } else { out <<"Set-Cookie:"<<getName()<<"="; string domain=getDomain(); if(!domain.empty()) { out<<"; Domain="<<domain; } string path=getPath(); if(!path.empty()) { out<<"; Path="<<path; } if(isSecure()) { cout<<"; Secure"; } out<<"; Expires=Fri, 01-Jan-1971 01:00:00 GMT; Version=1"; } }
void SipTransport::stateCallback(pjsip_transport_state state, const pjsip_transport_state_info *info) { connected_ = state == PJSIP_TP_STATE_CONNECTED; #if HAVE_TLS auto extInfo = static_cast<const pjsip_tls_state_info*>(info->ext_info); if (isSecure() && extInfo && extInfo->ssl_sock_info && extInfo->ssl_sock_info->established) { auto tlsInfo = extInfo->ssl_sock_info; tlsInfos_.proto = (pj_ssl_sock_proto)tlsInfo->proto; tlsInfos_.cipher = tlsInfo->cipher; tlsInfos_.verifyStatus = (pj_ssl_cert_verify_flag_t)tlsInfo->verify_status; const auto& peers = tlsInfo->remote_cert_info->raw_chain; std::vector<std::pair<const uint8_t*, const uint8_t*>> bits; bits.resize(peers.cnt); std::transform(peers.cert_raw, peers.cert_raw+peers.cnt, std::begin(bits), [](const pj_str_t& crt){ return std::make_pair((uint8_t*)crt.ptr, (uint8_t*)(crt.ptr+crt.slen)); }); tlsInfos_.peerCert = std::make_shared<dht::crypto::Certificate>(bits); } else { tlsInfos_ = {}; } #endif std::vector<SipTransportStateCallback> cbs; { std::lock_guard<std::mutex> lock(stateListenersMutex_); cbs.reserve(stateListeners_.size()); for (auto& l : stateListeners_) cbs.push_back(l.second); } for (auto& cb : cbs) cb(state, info); }
void think() { title(); draw(); if (turn == COMPUTER) { printf("\nBom, agora e minha vez, posso jogar? (1:sim) "); get(); int sum; bool isSec = false; for (int x=1; x<7 && !isSec; x++) { for (int y=0; y<4 && !isSec; y++) { if ( (comb[y] == 0) || (comb[y] - x < 0) ) continue; sum = newSum(y) + nBin[ comb[y]-x ]; if (isSecure(sum) == true) { isSec = true; comb[y] -= x; sticksCount -= x; } } } if(isSec == false) { for(int i=0; i<4; i++) { if(comb[i] != 0) { comb[i] -= 1; sticksCount--; break; } } } turn = PLAYER; } else if (turn == PLAYER) { int row, count; printf("Ok, entao digite a fileira que deseja operar (1-4): "); row = get() - 48; printf("Ok, agora me informe quantas pecas deseja remover: "); count = get() - 48; comb[row-1] -= count; sticksCount -= count; turn = COMPUTER; } }
/*! \internal */ QTextStream &QWebSocketHandshakeRequest::readFromStream(QTextStream &textStream) { m_isValid = false; clear(); if (Q_UNLIKELY(textStream.status() != QTextStream::Ok)) return textStream; const QString requestLine = textStream.readLine(); const QStringList tokens = requestLine.split(' ', QString::SkipEmptyParts); if (Q_UNLIKELY(tokens.length() < 3)) { m_isValid = false; clear(); return textStream; } const QString verb(tokens.at(0)); const QString resourceName(tokens.at(1)); const QString httpProtocol(tokens.at(2)); bool conversionOk = false; #if QT_VERSION >= QT_VERSION_CHECK(5, 0, 2) const float httpVersion = httpProtocol.midRef(5).toFloat(&conversionOk); #else const float httpVersion = httpProtocol.midRef(5).toString().toFloat(&conversionOk); #endif if (Q_UNLIKELY(!conversionOk)) { clear(); m_isValid = false; return textStream; } QString headerLine = textStream.readLine(); m_headers.clear(); while (!headerLine.isEmpty()) { const QStringList headerField = headerLine.split(QStringLiteral(": "), QString::SkipEmptyParts); if (Q_UNLIKELY(headerField.length() < 2)) { clear(); return textStream; } m_headers.insertMulti(headerField.at(0), headerField.at(1)); headerLine = textStream.readLine(); } const QString host = m_headers.value(QStringLiteral("Host"), QStringLiteral("")); m_requestUrl = QUrl::fromEncoded(resourceName.toLatin1()); if (m_requestUrl.isRelative()) m_requestUrl.setHost(host); if (m_requestUrl.scheme().isEmpty()) { const QString scheme = isSecure() ? QStringLiteral("wss") : QStringLiteral("ws"); m_requestUrl.setScheme(scheme); } const QStringList versionLines = m_headers.values(QStringLiteral("Sec-WebSocket-Version")); for (QStringList::const_iterator v = versionLines.begin(); v != versionLines.end(); ++v) { const QStringList versions = (*v).split(QStringLiteral(","), QString::SkipEmptyParts); for (QStringList::const_iterator i = versions.begin(); i != versions.end(); ++i) { bool ok = false; (void)(*i).toUInt(&ok); if (!ok) { clear(); return textStream; } const QWebSocketProtocol::Version ver = QWebSocketProtocol::versionFromString((*i).trimmed()); m_versions << ver; } } //sort in descending order std::sort(m_versions.begin(), m_versions.end(), std::greater<QWebSocketProtocol::Version>()); m_key = m_headers.value(QStringLiteral("Sec-WebSocket-Key"), QStringLiteral("")); //must contain "Upgrade", case-insensitive const QString upgrade = m_headers.value(QStringLiteral("Upgrade"), QStringLiteral("")); //must be equal to "websocket", case-insensitive const QString connection = m_headers.value(QStringLiteral("Connection"), QStringLiteral("")); const QStringList connectionLine = connection.split(QStringLiteral(","), QString::SkipEmptyParts); QStringList connectionValues; for (QStringList::const_iterator c = connectionLine.begin(); c != connectionLine.end(); ++c) connectionValues << (*c).trimmed(); //optional headers m_origin = m_headers.value(QStringLiteral("Sec-WebSocket-Origin"), QStringLiteral("")); const QStringList protocolLines = m_headers.values(QStringLiteral("Sec-WebSocket-Protocol")); for (QStringList::const_iterator pl = protocolLines.begin(); pl != protocolLines.end(); ++pl) { QStringList protocols = (*pl).split(QStringLiteral(","), QString::SkipEmptyParts); for (QStringList::const_iterator p = protocols.begin(); p != protocols.end(); ++p) m_protocols << (*p).trimmed(); } const QStringList extensionLines = m_headers.values(QStringLiteral("Sec-WebSocket-Extensions")); for (QStringList::const_iterator el = extensionLines.begin(); el != extensionLines.end(); ++el) { QStringList extensions = (*el).split(QStringLiteral(","), QString::SkipEmptyParts); for (QStringList::const_iterator e = extensions.begin(); e != extensions.end(); ++e) m_extensions << (*e).trimmed(); } //TODO: authentication field m_isValid = !(host.isEmpty() || resourceName.isEmpty() || m_versions.isEmpty() || m_key.isEmpty() || (verb != QStringLiteral("GET")) || (!conversionOk || (httpVersion < 1.1f)) || (upgrade.toLower() != QStringLiteral("websocket")) || (!connectionValues.contains(QStringLiteral("upgrade"), Qt::CaseInsensitive))); if (Q_UNLIKELY(!m_isValid)) clear(); return textStream; }
void SIPCall::startAllMedia() { if (isSecure() && not transport_->isSecure()) { RING_ERR("[call:%s] Can't perform secure call over insecure SIP transport", getCallId().c_str()); onFailure(EPROTONOSUPPORT); return; } auto slots = sdp_->getMediaSlots(); unsigned ice_comp_id = 0; bool peer_holding {true}; int slotN = -1; for (const auto& slot : slots) { ++slotN; const auto& local = slot.first; const auto& remote = slot.second; if (local.type != remote.type) { RING_ERR("[call:%s] [SDP:slot#%u] Inconsistent media types between local and remote", getCallId().c_str(), slotN); continue; } RtpSession* rtp = local.type == MEDIA_AUDIO ? static_cast<RtpSession*>(avformatrtp_.get()) #ifdef RING_VIDEO : static_cast<RtpSession*>(&videortp_); #else : nullptr; #endif if (not rtp) continue; if (!local.codec) { RING_WARN("[call:%s] [SDP:slot#%u] Missing local codec", getCallId().c_str(), slotN); continue; } if (!remote.codec) { RING_WARN("[call:%s] [SDP:slot#%u] Missing remote codec", getCallId().c_str(), slotN); continue; } peer_holding &= remote.holding; if (isSecure() && (not local.crypto || not remote.crypto)) { RING_ERR("[call:%s] [SDP:slot#%u] Can't perform secure call over insecure RTP transport", getCallId().c_str(), slotN); continue; } #ifdef RING_VIDEO if (local.type == MEDIA_VIDEO) videortp_.switchInput(videoInput_); #endif rtp->updateMedia(remote, local); if (isIceRunning()) { rtp->start(newIceSocket(ice_comp_id + 0), newIceSocket(ice_comp_id + 1)); ice_comp_id += 2; } else rtp->start(); switch (local.type) { #ifdef RING_VIDEO case MEDIA_VIDEO: isVideoMuted_ = videoInput_.empty(); break; #endif case MEDIA_AUDIO: isAudioMuted_ = not rtp->isSending(); break; default: break; } }
void Layer::onDraw(const sp<const DisplayDevice>& hw, const Region& clip) const { #ifdef STE_HARDWARE // Convert the texture to a native format if need be. // convert() returns immediately if no conversion is necessary. if (mSurfaceTexture != NULL) { status_t res = mSurfaceTexture->convert(); if (res != NO_ERROR) { ALOGE("Layer::onDraw: texture conversion failed. " "Texture content for this layer will not be initialized."); } } #endif ATRACE_CALL(); if (CC_UNLIKELY(mActiveBuffer == 0)) { // the texture has not been created yet, this Layer has // in fact never been drawn into. This happens frequently with // SurfaceView because the WindowManager can't know when the client // has drawn the first time. // If there is nothing under us, we paint the screen in black, otherwise // we just skip this update. // figure out if there is something below us Region under; const SurfaceFlinger::LayerVector& drawingLayers( mFlinger->mDrawingState.layersSortedByZ); const size_t count = drawingLayers.size(); for (size_t i=0 ; i<count ; ++i) { const sp<LayerBase>& layer(drawingLayers[i]); if (layer.get() == static_cast<LayerBase const*>(this)) break; under.orSelf( hw->getTransform().transform(layer->visibleRegion) ); } // if not everything below us is covered, we plug the holes! Region holes(clip.subtract(under)); if (!holes.isEmpty()) { clearWithOpenGL(hw, holes, 0, 0, 0, 1); } return; } status_t err = mSurfaceTexture->doGLFenceWait(); if (err != OK) { ALOGE("onDraw: failed waiting for fence: %d", err); // Go ahead and draw the buffer anyway; no matter what we do the screen // is probably going to have something visibly wrong. } bool blackOutLayer = isProtected() || (isSecure() && !hw->isSecure()); if (!blackOutLayer) { // TODO: we could be more subtle with isFixedSize() const bool useFiltering = getFiltering() || needsFiltering(hw) || isFixedSize(); // Query the texture matrix given our current filtering mode. float textureMatrix[16]; mSurfaceTexture->setFilteringEnabled(useFiltering); mSurfaceTexture->getTransformMatrix(textureMatrix); // Set things up for texturing. glBindTexture(GL_TEXTURE_EXTERNAL_OES, mTextureName); GLenum filter = GL_NEAREST; if (useFiltering) { filter = GL_LINEAR; } glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, filter); glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, filter); glMatrixMode(GL_TEXTURE); glLoadMatrixf(textureMatrix); glMatrixMode(GL_MODELVIEW); glDisable(GL_TEXTURE_2D); glEnable(GL_TEXTURE_EXTERNAL_OES); } else { glBindTexture(GL_TEXTURE_2D, mFlinger->getProtectedTexName()); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glDisable(GL_TEXTURE_EXTERNAL_OES); glEnable(GL_TEXTURE_2D); } drawWithOpenGL(hw, clip); glDisable(GL_TEXTURE_EXTERNAL_OES); glDisable(GL_TEXTURE_2D); }
void Layer::setGeometry( const sp<const DisplayDevice>& hw, HWComposer::HWCLayerInterface& layer) { layer.setDefaultState(); // enable this layer layer.setSkip(false); if (isSecure() && !hw->isSecure()) { layer.setSkip(true); } // this gives us only the "orientation" component of the transform const State& s(getDrawingState()); if (!isOpaque() || s.alpha != 0xFF) { layer.setBlending(mPremultipliedAlpha ? HWC_BLENDING_PREMULT : HWC_BLENDING_COVERAGE); } // apply the layer's transform, followed by the display's global transform // here we're guaranteed that the layer's transform preserves rects Rect frame(s.transform.transform(computeBounds())); frame.intersect(hw->getViewport(), &frame); const Transform& tr(hw->getTransform()); layer.setFrame(tr.transform(frame)); #ifdef QCOM_BSP // set dest_rect to display width and height, if external_only flag // for the layer is enabled or if its yuvLayer in extended mode. uint32_t x = 0, y = 0; uint32_t w = hw->getWidth(); uint32_t h = hw->getHeight(); bool extendedMode = SurfaceFlinger::isExtendedMode(); if(isExtOnly()) { // Position: fullscreen for ext_only Rect r(0, 0, w, h); layer.setFrame(r); } else if(hw->getDisplayType() > 0 && (extendedMode && isYuvLayer())) { // Need to position the video full screen on external with aspect ratio Rect r = getAspectRatio(hw, s.active.w, s.active.h); layer.setFrame(r); } #endif layer.setCrop(computeCrop(hw)); layer.setPlaneAlpha(s.alpha); /* * Transformations are applied in this order: * 1) buffer orientation/flip/mirror * 2) state transformation (window manager) * 3) layer orientation (screen orientation) * (NOTE: the matrices are multiplied in reverse order) */ const Transform bufferOrientation(mCurrentTransform); Transform transform(tr * s.transform * bufferOrientation); if (mSurfaceFlingerConsumer->getTransformToDisplayInverse()) { /* * the code below applies the display's inverse transform to the buffer */ uint32_t invTransform = hw->getOrientationTransform(); // calculate the inverse transform if (invTransform & NATIVE_WINDOW_TRANSFORM_ROT_90) { invTransform ^= NATIVE_WINDOW_TRANSFORM_FLIP_V | NATIVE_WINDOW_TRANSFORM_FLIP_H; } // and apply to the current transform transform = transform * Transform(invTransform); } // this gives us only the "orientation" component of the transform const uint32_t orientation = transform.getOrientation(); if (orientation & Transform::ROT_INVALID) { // we can only handle simple transformation layer.setSkip(true); } else { layer.setTransform(orientation); } }
Socket::Status TcpSocket::connect(const IpAddress& remoteAddress, unsigned short remotePort, Time timeout) { // Create the internal socket if it doesn't exist create(); if (isSecure()) { char port[5]; sprintf(port, "%d", remotePort); if (mbedtls_net_connect(&getSecureData().socket, remoteAddress.toString().c_str(), port, MBEDTLS_NET_PROTO_TCP) != 0) return priv::SocketImpl::getErrorStatus(); int ret; while((ret = mbedtls_ssl_handshake(&getSecureData().ssl)) != 0) if(ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) return Error; } else { // Create the remote address sockaddr_in address = priv::SocketImpl::createAddress(remoteAddress.toInteger(), remotePort); if (timeout <= Time::Zero) { // ----- We're not using a timeout: just try to connect ----- // Connect the socket if (::connect(getHandle(), reinterpret_cast<sockaddr*>(&address), sizeof(address)) == -1) return priv::SocketImpl::getErrorStatus(); // Connection succeeded return Done; } else { // ----- We're using a timeout: we'll need a few tricks to make it work ----- // Save the previous blocking state bool blocking = isBlocking(); // Switch to non-blocking to enable our connection timeout if (blocking) setBlocking(false); // Try to connect to the remote address if (::connect(getHandle(), reinterpret_cast<sockaddr*>(&address), sizeof(address)) >= 0) { // We got instantly connected! (it may no happen a lot...) setBlocking(blocking); return Done; } // Get the error status Status status = priv::SocketImpl::getErrorStatus(); // If we were in non-blocking mode, return immediately if (!blocking) return status; // Otherwise, wait until something happens to our socket (success, timeout or error) if (status == Socket::NotReady) { // Setup the selector fd_set selector; FD_ZERO(&selector); FD_SET(getHandle(), &selector); // Setup the timeout timeval time; time.tv_sec = static_cast<long>(timeout.asMicroseconds() / 1000000); time.tv_usec = static_cast<long>(timeout.asMicroseconds() % 1000000); // Wait for something to write on our socket (which means that the connection request has returned) if (select(static_cast<int>(getHandle() + 1), NULL, &selector, NULL, &time) > 0) { // At this point the connection may have been either accepted or refused. // To know whether it's a success or a failure, we must check the address of the connected peer if (getRemoteAddress() != cpp3ds::IpAddress::None) { // Connection accepted status = Done; } else { // Connection refused status = priv::SocketImpl::getErrorStatus(); } } else { // Failed to connect before timeout is over status = priv::SocketImpl::getErrorStatus(); } } // Switch back to blocking mode setBlocking(true); return status; } } }
void Layer::onDraw(const sp<const DisplayDevice>& hw, const Region& clip) const { ATRACE_CALL(); if (CC_UNLIKELY(mActiveBuffer == 0)) { // the texture has not been created yet, this Layer has // in fact never been drawn into. This happens frequently with // SurfaceView because the WindowManager can't know when the client // has drawn the first time. // If there is nothing under us, we paint the screen in black, otherwise // we just skip this update. // figure out if there is something below us Region under; const SurfaceFlinger::LayerVector& drawingLayers( mFlinger->mDrawingState.layersSortedByZ); const size_t count = drawingLayers.size(); for (size_t i=0 ; i<count ; ++i) { const sp<LayerBase>& layer(drawingLayers[i]); if (layer.get() == static_cast<LayerBase const*>(this)) break; under.orSelf( hw->getTransform().transform(layer->visibleRegion) ); } // if not everything below us is covered, we plug the holes! Region holes(clip.subtract(under)); if (!holes.isEmpty()) { // [MTK] {{{ // add debug log XLOGI("[%s] %s(i:%d): clear screen hole (l:%d, t:%d, w:%d, h:%d)", __func__, getName().string(), getIdentity(), holes.getBounds().left, holes.getBounds().top, holes.getBounds().width(), holes.getBounds().height()); // [MTK] }}} clearWithOpenGL(hw, holes, 0, 0, 0, 1); } return; } status_t err = mSurfaceTexture->doGLFenceWait(); if (err != OK) { ALOGE("onDraw: failed waiting for fence: %d", err); // Go ahead and draw the buffer anyway; no matter what we do the screen // is probably going to have something visibly wrong. } bool blackOutLayer = isProtected() || (isSecure() && !hw->isSecure()); if (!blackOutLayer) { // TODO: we could be more subtle with isFixedSize() const bool useFiltering = getFiltering() || needsFiltering(hw) || isFixedSize(); // Query the texture matrix given our current filtering mode. float textureMatrix[16]; mSurfaceTexture->setFilteringEnabled(useFiltering); mSurfaceTexture->getTransformMatrix(textureMatrix); // Set things up for texturing. // [MTK] {{{ // buffer conversion here for SF layer if (true == mSurfaceTexture->isAuxSlotNeedConvert()) { mSurfaceTexture->convertToAuxSlot(true); } if (true == mSurfaceTexture->isAuxSlotDirty()) { // bind to aux buffer if converted successfully mSurfaceTexture->bindToAuxSlot(); } else { // bind to original buffer glBindTexture(GL_TEXTURE_EXTERNAL_OES, mTextureName); } // [MTK] }}} GLenum filter = GL_NEAREST; if (useFiltering) { filter = GL_LINEAR; } glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, filter); glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, filter); glMatrixMode(GL_TEXTURE); glLoadMatrixf(textureMatrix); glMatrixMode(GL_MODELVIEW); glDisable(GL_TEXTURE_2D); glEnable(GL_TEXTURE_EXTERNAL_OES); } else { glBindTexture(GL_TEXTURE_2D, mFlinger->getProtectedTexName()); glMatrixMode(GL_TEXTURE); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glDisable(GL_TEXTURE_EXTERNAL_OES); glEnable(GL_TEXTURE_2D); } drawWithOpenGL(hw, clip); glDisable(GL_TEXTURE_EXTERNAL_OES); glDisable(GL_TEXTURE_2D); }
void Layer::onDraw(const sp<const DisplayDevice>& hw, const Region& clip) const { ATRACE_CALL(); if (CC_UNLIKELY(mActiveBuffer == 0)) { // the texture has not been created yet, this Layer has // in fact never been drawn into. This happens frequently with // SurfaceView because the WindowManager can't know when the client // has drawn the first time. // If there is nothing under us, we paint the screen in black, otherwise // we just skip this update. // figure out if there is something below us Region under; const SurfaceFlinger::LayerVector& drawingLayers( mFlinger->mDrawingState.layersSortedByZ); const size_t count = drawingLayers.size(); for (size_t i=0 ; i<count ; ++i) { const sp<Layer>& layer(drawingLayers[i]); if (layer.get() == static_cast<Layer const*>(this)) break; under.orSelf( hw->getTransform().transform(layer->visibleRegion) ); } // if not everything below us is covered, we plug the holes! Region holes(clip.subtract(under)); if (!holes.isEmpty()) { clearWithOpenGL(hw, holes, 0, 0, 0, 1); } return; } // Bind the current buffer to the GL texture, and wait for it to be // ready for us to draw into. status_t err = mSurfaceFlingerConsumer->bindTextureImage(); if (err != NO_ERROR) { ALOGW("onDraw: bindTextureImage failed (err=%d)", err); // Go ahead and draw the buffer anyway; no matter what we do the screen // is probably going to have something visibly wrong. } bool canAllowGPU = false; #ifdef QCOM_BSP if(isProtected()) { char property[PROPERTY_VALUE_MAX]; if ((property_get("persist.gralloc.cp.level3", property, NULL) > 0) && (atoi(property) == 1)) { canAllowGPU = true; } } #endif bool blackOutLayer = isProtected() || (isSecure() && !hw->isSecure()); RenderEngine& engine(mFlinger->getRenderEngine()); if (!blackOutLayer || (canAllowGPU)) { // TODO: we could be more subtle with isFixedSize() const bool useFiltering = getFiltering() || needsFiltering(hw) || isFixedSize(); // Query the texture matrix given our current filtering mode. float textureMatrix[16]; mSurfaceFlingerConsumer->setFilteringEnabled(useFiltering); mSurfaceFlingerConsumer->getTransformMatrix(textureMatrix); if (mSurfaceFlingerConsumer->getTransformToDisplayInverse()) { /* * the code below applies the display's inverse transform to the texture transform */ // create a 4x4 transform matrix from the display transform flags const mat4 flipH(-1,0,0,0, 0,1,0,0, 0,0,1,0, 1,0,0,1); const mat4 flipV( 1,0,0,0, 0,-1,0,0, 0,0,1,0, 0,1,0,1); const mat4 rot90( 0,1,0,0, -1,0,0,0, 0,0,1,0, 1,0,0,1); mat4 tr; uint32_t transform = hw->getOrientationTransform(); if (transform & NATIVE_WINDOW_TRANSFORM_ROT_90) tr = tr * rot90; if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_H) tr = tr * flipH; if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_V) tr = tr * flipV; // calculate the inverse tr = inverse(tr); // and finally apply it to the original texture matrix const mat4 texTransform(mat4(static_cast<const float*>(textureMatrix)) * tr); memcpy(textureMatrix, texTransform.asArray(), sizeof(textureMatrix)); } // Set things up for texturing. mTexture.setDimensions(mActiveBuffer->getWidth(), mActiveBuffer->getHeight()); mTexture.setFiltering(useFiltering); mTexture.setMatrix(textureMatrix); engine.setupLayerTexturing(mTexture); } else { engine.setupLayerBlackedOut(); } drawWithOpenGL(hw, clip); engine.disableTexturing(); }