bool TileMap::collision(const od::Shape * shape, float x, float y) const { od::Rect box = shape->boundingBox(); //od::Rect box = shape->boundingBox().translate(xoffset(), yoffset()); // + ou -? => a verifier x -= xoffset(); y -= yoffset(); int x1 = (x + box.x1 ) / tileWidth(); int x2 = (x + box.x2 - 1) / tileWidth(); int y1 = (y + box.y1 ) / tileHeight(); int y2 = (y + box.y2 - 1) / tileHeight(); if(x1 < 0) x1 = 0; if(y1 < 0) y1 = 0; if(x2 > width()- 1) x2 = width()-1; if(y2 > height()-1) y2 = height()-1; for(int i=x1; i<=x2; ++i) { for(int j=y1; j<=y2; ++j) { const od::Shape * tile = tileCollisionMask(i, j); if(tile && tileSolid(i, j) && shape->collide(*tile, i * tileWidth() - x, j * tileHeight() - y)) return true; } } return false; }
void ScaledImagePlane::ensureUpToDate(unsigned int tileX, unsigned int tileY, PixmapTile* tile) { ImageTile& imageTile = tiles.at(tileX, tileY); //Create the image if need be. if (imageTile.image.isNull()) { imageTile.image = parent->format.makeImage(tileWidth (tileX), tileHeight(tileY)); ImageManager::imageCache()->addEntry(&imageTile); std::memset(imageTile.versions, 0, Tile::TileSize); } else ImageManager::imageCache()->touchEntry(&imageTile); //Pull in updates to the image. for (unsigned int line = 0; line < tileHeight(tileY); ++line) { int origLine = yScaleTable[line + tileY*Tile::TileSize]; if (imageTile.versions[line] < parent->versions[origLine]) { imageTile.versions[line] = parent->versions[origLine]; if (parent->format.depth() == 1) scaleLoop<quint8>(&imageTile.image, xScaleTable, line, parent->image, origLine, tileX, tileY); else scaleLoop<quint32>(&imageTile.image, xScaleTable, line, parent->image, origLine, tileX, tileY); } } //Now, push stuff into the pixmap. updatePixmap(tile, imageTile.image, tileX, tileY, 0, 0, imageTile.versions); }
void TileSet::resizeTiles(int maxWidth, int maxHeight) { // calculate largest tile size that will fit in maxWidth/maxHeight // and maintain the tile's height-to-width ratio double ratio = static_cast<double>(unscaledTileHeight()) / unscaledTileWidth(); if(maxWidth * ratio < maxHeight) maxHeight = qRound(maxWidth * ratio); else maxWidth = qRound(maxHeight / ratio); if(maxHeight == tileHeight() && maxWidth == tileWidth()) return; //kdDebug() << "tile size: " << maxWidth << "x" << maxHeight << endl; QImage img; for(int i = 0; i < nTiles; i++) { if(maxHeight == unscaledTileHeight()) img = unscaledTiles[i].copy();//.convertDepth(32); else img = unscaledTiles[i].smoothScale(maxWidth, maxHeight); scaledTiles[i].convertFromImage(img); } }
void TilesManager::allocateTextures() { int nbTexturesToAllocate = m_currentTextureCount - m_textures.size(); ALOGV("%d tiles to allocate (%d textures planned)", nbTexturesToAllocate, m_currentTextureCount); int nbTexturesAllocated = 0; for (int i = 0; i < nbTexturesToAllocate; i++) { TileTexture* texture = new TileTexture( tileWidth(), tileHeight()); // the atomic load ensures that the texture has been fully initialized // before we pass a pointer for other threads to operate on TileTexture* loadedTexture = reinterpret_cast<TileTexture*>( android_atomic_acquire_load(reinterpret_cast<int32_t*>(&texture))); m_textures.append(loadedTexture); nbTexturesAllocated++; } int nbLayersTexturesToAllocate = m_currentLayerTextureCount - m_tilesTextures.size(); ALOGV("%d layers tiles to allocate (%d textures planned)", nbLayersTexturesToAllocate, m_currentLayerTextureCount); int nbLayersTexturesAllocated = 0; for (int i = 0; i < nbLayersTexturesToAllocate; i++) { TileTexture* texture = new TileTexture( tileWidth(), tileHeight()); // the atomic load ensures that the texture has been fully initialized // before we pass a pointer for other threads to operate on TileTexture* loadedTexture = reinterpret_cast<TileTexture*>( android_atomic_acquire_load(reinterpret_cast<int32_t*>(&texture))); m_tilesTextures.append(loadedTexture); nbLayersTexturesAllocated++; } ALOGV("allocated %d textures for base (total: %d, %d Mb), %d textures for layers (total: %d, %d Mb)", nbTexturesAllocated, m_textures.size(), m_textures.size() * TILE_WIDTH * TILE_HEIGHT * 4 / 1024 / 1024, nbLayersTexturesAllocated, m_tilesTextures.size(), m_tilesTextures.size() * tileWidth() * tileHeight() * 4 / 1024 / 1024); }
AudioPreviewPainter::AudioPreviewPainter(CompositionModelImpl& model, CompositionModelImpl::AudioPreviewData* apData, const Composition &composition, const Segment* segment) : m_model(model), m_apData(apData), m_composition(composition), m_segment(segment), m_rect(model.computeSegmentRect(*(segment))), m_defaultCol(CompositionColourCache::getInstance()->SegmentAudioPreview), m_height(model.grid().getYSnap()/2) { int pixWidth = std::min(m_rect.getBaseWidth(), tileWidth()); //NB. m_image used to be created as an 8-bit image with 4 bits per pixel. // QImage::Format_Indexed8 seems to be close enough, since we manipulate the // pixels directly by index, rather than employ drawing tools. m_image = QImage(pixWidth, m_rect.height(), QImage::Format_Indexed8); m_penWidth = (std::max(1U, (unsigned int)m_rect.getPen().width()) * 2); m_halfRectHeight = m_model.grid().getYSnap()/2 - m_penWidth / 2 - 2; }
void AudioPreviewPainter::paintPreviewImage() { const CompositionModelImpl::AudioPreviewData::Values &values = m_apData->values; if (values.empty()) return; float gain[2] = { 1.0, 1.0 }; int instrumentChannels = 2; TrackId trackId = m_segment->getTrack(); Track *track = m_model.getComposition().getTrackById(trackId); if (track) { Instrument *instrument = m_model.getStudio().getInstrumentById(track->getInstrument()); if (instrument) { float level = AudioLevel::dB_to_multiplier(instrument->getLevel()); float pan = instrument->getPan() - 100.0; gain[0] = level * ((pan > 0.0) ? (1.0 - (pan / 100.0)) : 1.0); gain[1] = level * ((pan < 0.0) ? ((pan + 100.0) / 100.0) : 1.0); instrumentChannels = instrument->getAudioChannels(); } } // This was always false. bool showMinima = false; //m_apData->showsMinima(); unsigned int channels = m_apData->channels; if (channels == 0) { RG_DEBUG << "AudioPreviewPainter::paintPreviewImage : problem with audio file for segment " << m_segment->getLabel().c_str() << endl; return; } int samplePoints = int(values.size()) / (channels * (showMinima ? 2 : 1)); float h1, h2, l1 = 0, l2 = 0; double sampleScaleFactor = samplePoints / double(m_rect.getBaseWidth()); m_sliceNb = 0; initializeNewSlice(); int centre = m_image.height() / 2; RG_DEBUG << "AudioPreviewPainter::paintPreviewImage width = " << m_rect.getBaseWidth() << ", height = " << m_rect.height() << ", halfRectHeight = " << m_halfRectHeight << endl; RG_DEBUG << "AudioPreviewPainter::paintPreviewImage: channels = " << channels << ", gain left = " << gain[0] << ", right = " << gain[1] << endl; // double audioDuration = double(m_segment->getAudioEndTime().sec) + // double(m_segment->getAudioEndTime().nsec) / 1000000000.0; // We need to take each pixel value and map it onto a point within // the preview. We have samplePoints preview points in a known // duration of audioDuration. Thus each point spans a real time // of audioDuration / samplePoints. We need to convert the // accumulated real time back into musical time, and map this // proportionately across the segment width. RealTime startRT = m_model.getComposition().getElapsedRealTime(m_segment->getStartTime()); double startTime = double(startRT.sec) + double(startRT.nsec) / 1000000000.0; RealTime endRT = m_model.getComposition().getElapsedRealTime(m_segment->getEndMarkerTime()); double endTime = double(endRT.sec) + double(endRT.nsec) / 1000000000.0; bool haveTempoChange = false; int finalTempoChangeNumber = m_model.getComposition().getTempoChangeNumberAt (m_segment->getEndMarkerTime()); if ((finalTempoChangeNumber >= 0) && (finalTempoChangeNumber > m_model.getComposition().getTempoChangeNumberAt (m_segment->getStartTime()))) { haveTempoChange = true; } QSettings settings; settings.beginGroup( GeneralOptionsConfigGroup ); bool meterLevels = (settings.value("audiopreviewstyle", 1).toUInt() == 1); for (int i = 0; i < m_rect.getBaseWidth(); ++i) { // i is the x coordinate within the rectangle. We need to // calculate the position within the audio preview from which // to draw the peak for this coordinate. It's possible there // may be more than one, in which case we need to find the // peak of all of them. int position = 0; if (haveTempoChange) { // First find the time corresponding to this i. timeT musicalTime = m_model.grid().getRulerScale()->getTimeForX(m_rect.x() + i); RealTime realTime = m_model.getComposition().getElapsedRealTime(musicalTime); double time = double(realTime.sec) + double(realTime.nsec) / 1000000000.0; double offset = time - startTime; if (endTime > startTime) { position = offset * m_rect.getBaseWidth() / (endTime - startTime); position = int(channels * position); } } else { position = int(channels * i * sampleScaleFactor); } if (position < 0) continue; if (position >= int(values.size()) - int(channels)) { finalizeCurrentSlice(); break; } if (channels == 1) { h1 = values[position++]; h2 = h1; if (showMinima) { l1 = values[position++]; l2 = l1; } } else { h1 = values[position++]; if (showMinima) l1 = values[position++]; h2 = values[position++]; if (showMinima) l2 = values[position++]; } if (instrumentChannels == 1 && channels == 2) { h1 = h2 = (h1 + h2) / 2; l1 = l2 = (l1 + l2) / 2; } h1 *= gain[0]; h2 *= gain[1]; l1 *= gain[0]; l2 *= gain[1]; // int width = 1; int pixel; // h1 left, h2 right if (h1 >= 1.0) { h1 = 1.0; pixel = 2; } else { pixel = 1; } int h; if (meterLevels) { h = AudioLevel::multiplier_to_preview(h1, m_height); } else { h = h1 * m_height; } if (h <= 0) h = 1; if (h > m_halfRectHeight) h = m_halfRectHeight; int rectX = i % tileWidth(); for (int py = 0; py < h; ++py) { m_image.setPixel(rectX, centre - py, pixel); } if (h2 >= 1.0) { h2 = 1.0; pixel = 2; } else { pixel = 1; } if (meterLevels) { h = AudioLevel::multiplier_to_preview(h2, m_height); } else { h = h2 * m_height; } if (h < 0) h = 0; for (int py = 0; py < h; ++py) { m_image.setPixel(rectX, centre + py, pixel); } if (((i+1) % tileWidth()) == 0 || i == (m_rect.getBaseWidth() - 1)) { finalizeCurrentSlice(); initializeNewSlice(); } } /* Auto-fade not yet implemented. if (m_segment->isAutoFading()) { Composition &comp = m_model.getComposition(); int audioFadeInEnd = int( m_model.grid().getRulerScale()->getXForTime(comp. getElapsedTimeForRealTime(m_segment->getFadeInTime()) + m_segment->getStartTime()) - m_model.grid().getRulerScale()->getXForTime(m_segment->getStartTime())); m_p.setPen(QColor(Qt::blue)); m_p.drawRect(0, m_apData->getSegmentRect().height() - 1, audioFadeInEnd, 1); m_pb.drawRect(0, m_apData->getSegmentRect().height() - 1, audioFadeInEnd, 1); } m_p.end(); m_pb.end(); */ settings.endGroup(); }