//---------------------------------------------------------------------------- void LayMap::addCriticalPoints(Clst &c,short *mtx) // // adds points belonging to critical area and laying under mirror axis // if it's really critical { int ver=imageMap->rows, hor=imageMap->cols; List& list=c.criticalPoints; int x=c.cX,y=c.cY; ArrayIterator aIter(imageMap->criLines); for(;(Item&)aIter != NOITEM;aIter++) { List &criLine = (List&)(Item&)aIter; ListIterator criIter(criLine); for(;(Item&)criIter != NOITEM;criIter++) { CriCandidate &cp = (CriCandidate&)(Item&)criIter; if (cp.neighbors == 0 || (cp.neighbors & c.pattern)!= cp.neighbors) { // we don't have to check points laying // on mirror axis between two sectors // taken by this cell or belonging // to critical universal feeds laying // inside this cell // cp.neighbors in this place contains set of // bits on positions of sectors on // both sides of axis for restricted feed // or sectors to which belong critical // universal feed int net=findNetId(x*hor+cp.x,y*ver+cp.y,layerType(cp.layer),mtx); if (net != 0) // this point is connected // to one of terminals { CriPoint &newCp = * new CriPoint(cp.x,cp.y,layerType(cp.layer),net); if ( ! list.hasItem( newCp) ) list.add( newCp ); else delete (void*)&newCp; } } } } }// LayMap::addCriticalPoints //
void getBlobsToVisualize(caffe::Net<float> & net, std::vector<std::string> & blobsToVisualize) { // find input blob const int nInputBlobs = net.input_blob_indices().size(); if (nInputBlobs == 0) { std::cerr << "there are no input blobs - where to start?" << std::endl; return; } blobsToVisualize.push_back(net.blob_names()[net.input_blob_indices()[0]]); std::set<std::string> layersWhoOutputVisualizableBlobs; layersWhoOutputVisualizableBlobs.insert("Convolution"); std::map<std::string,std::vector<int> > outputBlobsToVisualizeByLayerType; outputBlobsToVisualizeByLayerType["Convolution"] = std::vector<int>(1,0); outputBlobsToVisualizeByLayerType["InnerProduct"] = std::vector<int>(1,0); outputBlobsToVisualizeByLayerType["Pooling"] = std::vector<int>(1,0); const int nLayers = net.layers().size(); for (int i=0; i<nLayers; ++i) { boost::shared_ptr<caffe::Layer<float> > layer = net.layers()[i]; std::string layerType(layer->type()); if (outputBlobsToVisualizeByLayerType.find(layerType) != outputBlobsToVisualizeByLayerType.end()) { caffe::Blob<float> * outputBlob = net.top_vecs()[i][0]; int blobNum = getBlobNumber(net,outputBlob); assert(blobNum >= 0); blobsToVisualize.push_back(net.blob_names()[blobNum]); } } }
void LayerCompositingThread::releaseTextureResources() { if (m_pluginView && m_pluginBuffer) { BlackBerry::Platform::Graphics::releaseBufferGLTexture(m_pluginBuffer); m_pluginBuffer = 0; m_pluginView->unlockFrontBuffer(); } if (m_frontBufferLock && (m_texID || layerType() == LayerData::WebGLLayer)) pthread_mutex_unlock(m_frontBufferLock); }
void LayerCompositingThread::drawTextures(double scale, int positionLocation, int texCoordLocation, const FloatRect& visibleRect) { static float texcoords[4 * 2] = { 0, 0, 0, 1, 1, 1, 1, 0 }; if (m_pluginView) { if (m_isVisible) { // The layer contains Flash, video, or other plugin contents. m_pluginBuffer = m_pluginView->lockFrontBufferForRead(); if (!m_pluginBuffer) return; if (!BlackBerry::Platform::Graphics::lockAndBindBufferGLTexture(m_pluginBuffer, GL_TEXTURE_2D)) { m_pluginView->unlockFrontBuffer(); return; } m_layerRenderer->addLayerToReleaseTextureResourcesList(this); glVertexAttribPointer(positionLocation, 2, GL_FLOAT, GL_FALSE, 0, &m_transformedBounds); glVertexAttribPointer(texCoordLocation, 2, GL_FLOAT, GL_FALSE, 0, texcoords); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); } return; } #if ENABLE(VIDEO) if (m_mediaPlayer) { if (m_isVisible) { // We need to specify the media player location in contents coordinates. The 'visibleRect' // specifies the content region covered by our viewport. So we transform from our // normalized device coordinates [-1, 1] to the 'visibleRect'. float vrw2 = visibleRect.width() / 2.0; float vrh2 = visibleRect.height() / 2.0; float x = m_transformedBounds.p1().x() * vrw2 + vrw2 + visibleRect.x(); float y = -m_transformedBounds.p1().y() * vrh2 + vrh2 + visibleRect.y(); m_mediaPlayer->paint(0, IntRect((int)(x + 0.5), (int)(y + 0.5), m_bounds.width(), m_bounds.height())); MediaPlayerPrivate* mpp = static_cast<MediaPlayerPrivate*>(m_mediaPlayer->platformMedia().media.qnxMediaPlayer); mpp->drawBufferingAnimation(m_drawTransform, positionLocation, texCoordLocation); } return; } #endif #if ENABLE(WEBGL) if (layerType() == LayerData::WebGLLayer) { m_layerRenderer->addLayerToReleaseTextureResourcesList(this); pthread_mutex_lock(m_frontBufferLock); glVertexAttribPointer(positionLocation, 2, GL_FLOAT, GL_FALSE, 0, &m_transformedBounds); float canvasWidthRatio = 1.0f; float canvasHeightRatio = 1.0f; float upsideDown[4 * 2] = { 0, 1, 0, 1 - canvasHeightRatio, canvasWidthRatio, 1 - canvasHeightRatio, canvasWidthRatio, 1 }; // Flip the texture Y axis because OpenGL and Skia have different origins glVertexAttribPointer(texCoordLocation, 2, GL_FLOAT, GL_FALSE, 0, upsideDown); glBindTexture(GL_TEXTURE_2D, m_texID); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); // FIXME: If the canvas/texture is larger than 2048x2048, then we'll die here return; } #endif if (m_texID) { m_layerRenderer->addLayerToReleaseTextureResourcesList(this); pthread_mutex_lock(m_frontBufferLock); glDisable(GL_SCISSOR_TEST); glBindTexture(GL_TEXTURE_2D, m_texID); glVertexAttribPointer(positionLocation, 2, GL_FLOAT, GL_FALSE, 0, &m_transformedBounds); float upsideDown[4 * 2] = { 0, 1, 0, 0, 1, 0, 1, 1 }; glVertexAttribPointer(texCoordLocation, 2, GL_FLOAT, GL_FALSE, 0, upsideDown); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); return; } if (m_client) m_client->drawTextures(this, scale, positionLocation, texCoordLocation); }
void getBlobStridesAndReceptiveFields(caffe::Net<float> & net, const std::vector<std::string> & blobsToVisualize, std::map<std::string,int> & strides, std::map<std::string,int2> & receptiveFields) { const int nInputBlobs = net.input_blob_indices().size(); if (nInputBlobs == 0) { std::cerr << "there are no input blobs - where to start?" << std::endl; return; } std::string inputBlobName(net.blob_names()[net.input_blob_indices()[0]]); strides[inputBlobName] = 1; receptiveFields[inputBlobName] = make_int2(1,1); boost::shared_ptr<caffe::Blob<float> > inputBlob = net.blob_by_name(inputBlobName); int2 inputSize = make_int2(inputBlob->width(),inputBlob->height()); const int nLayers = net.layers().size(); for (int i=0; i<nLayers; ++i) { boost::shared_ptr<caffe::Layer<float> > layer = net.layers()[i]; std::string layerType(layer->type()); bool isConv = layerType == std::string("Convolution"); bool isPool = layerType == std::string("Pooling"); if (isConv || isPool) { caffe::Blob<float> * inputBlob = net.bottom_vecs()[i][0]; int inputBlobNum = getBlobNumber(net,inputBlob); assert(inputBlobNum >= 0); std::string inputBlobName = net.blob_names()[inputBlobNum]; caffe::Blob<float> * outputBlob = net.top_vecs()[i][0]; int outputBlobNum = getBlobNumber(net,outputBlob); assert(outputBlobNum >= 0); std::string outputBlobName = net.blob_names()[outputBlobNum]; int2 kernelSize, stride; if (isConv) { caffe::ConvolutionParameter convParam = layer->layer_param().convolution_param(); kernelSize = convParam.has_kernel_size() ? make_int2(convParam.kernel_size()) : make_int2(convParam.kernel_w(),convParam.kernel_h()); stride = convParam.has_stride() ? make_int2(convParam.stride()) : make_int2(convParam.stride_w(),convParam.stride_h()); } else if (isPool) { caffe::PoolingParameter poolParam = layer->layer_param().pooling_param(); kernelSize = poolParam.has_kernel_size() ? make_int2(poolParam.kernel_size()) : make_int2(poolParam.kernel_w(),poolParam.kernel_h()); stride = poolParam.has_stride() ? make_int2(poolParam.stride()) : make_int2(poolParam.stride_w(),poolParam.stride_h()); } if (strides.find(inputBlobName) != strides.end()) { const int strideIn = strides[inputBlobName]; const int2 fieldIn = receptiveFields[inputBlobName]; strides[outputBlobName] = strideIn*stride.x; receptiveFields[outputBlobName] = strideIn*(kernelSize - make_int2(1)) + fieldIn; } } else if (layerType == std::string("InnerProduct")) { caffe::Blob<float> * inputBlob = net.bottom_vecs()[i][0]; int inputBlobNum = getBlobNumber(net,inputBlob); assert(inputBlobNum >= 0); std::string inputBlobName = net.blob_names()[inputBlobNum]; caffe::Blob<float> * outputBlob = net.top_vecs()[i][0]; int outputBlobNum = getBlobNumber(net,outputBlob); assert(outputBlobNum >= 0); std::string outputBlobName = net.blob_names()[outputBlobNum]; if (strides.find(inputBlobName) != strides.end()) { strides[outputBlobName] = strides[inputBlobName]; receptiveFields[outputBlobName] = inputSize; } } } }