AttributeArray MediaArray::getAttributes(int flags) const { if(size() == 0) { return AttributeArray(); } wait(); AttributeSet aset = (*this)[0].getAttributes(flags); for(size_t i = 1; i < size(); i++) { aset *= (*this)[i].getAttributes(flags); } notify(); return aset; }
DirectionHistogram::DirectionHistogram(int numSlices, const Vector3& axis) : m_slices(numSlices) { alwaysAssertM(numSlices >= 4, "At least four slices required"); // Turn on old hemisphere-only optimization const bool hemi = true; // Normalize const Vector3& Z = axis.direction(); Vector3 X = (abs(Z.dot(Vector3::unitX())) <= 0.9f) ? Vector3::unitX() : Vector3::unitY(); X = (X - Z * (Z.dot(X))).direction(); const Vector3 Y = Z.cross(X); // Only generate upper hemisphere static const int P = m_slices; static const int T = hemi ? (m_slices / 2) : m_slices; const float thetaExtent = float( hemi ? G3D::halfPi() : G3D::pi() ); for (int t = 0; t < T; ++t) { const float theta = t * thetaExtent / (T - 1.0f); const float z = cos(theta); const float r = sin(theta); const bool firstRow = (t == 0); const bool secondRow = (t == 1); const bool lastRow = ! hemi && (t == T - 1); for (int p = 0; p < P; ++p) { const float phi = p * float(G3D::twoPi()) / P; const float x = cos(phi) * r; const float y = sin(phi) * r; const bool unique = (! firstRow && ! lastRow) || (p == 0); // Only insert one vertex at each pole if (unique) { m_meshVertex.append(X * x + Y * y + Z * z); } const int i = m_meshVertex.size() - 1; // Index of the start of this row const int rowStart = ((i - 1) / P) * P + 1; const int colOffset = i - rowStart; if (firstRow) { // (First row generates no quads) } else if (secondRow) { // Degnererate north pole m_meshIndex.append(0, 0, i, rowStart + (colOffset + 1) % P); } else if (lastRow) { // Degenerate south pole m_meshIndex.append(i, i, i - p - 1, i - p - 2); } else { m_meshIndex.append( i - P, i, rowStart + (colOffset + 1) % P, rowStart + ((colOffset + 1) % P) - P); } } } m_bucket.resize(m_meshVertex.size()); reset(); // We initially accumulate areas and then invert them in a follow-up pass m_invArea.resize(m_meshIndex.size()); // Zero the array System::memset(m_invArea.getCArray(), 0, sizeof(float) * m_invArea.size()); CPUVertexArray vertexArray; // Create triTree { vertexArray.hasTangent = false; vertexArray.hasTexCoord0 = false; for(int i = 0; i < m_meshVertex.size(); ++i){ CPUVertexArray::Vertex v; v.position = m_meshVertex[i]; v.normal = m_meshVertex[i]; vertexArray.vertex.append(v); } Array<Tri> triArray; for (int q = 0; q < m_meshIndex.size(); q += 4) { const int i0 = m_meshIndex[q]; const int i1 = m_meshIndex[q + 1]; const int i2 = m_meshIndex[q + 2]; const int i3 = m_meshIndex[q + 3]; // Create two tris for each quad // Wind backwards; these tris have to face inward const Proxy<Material>::Ref vii(VertexIndexIndex::create(q)); Tri A(i0, i3, i2, vertexArray, vii); Tri B(i0, i2, i1, vertexArray, vii); triArray.append(A); triArray.append(B); // Attribute the area of the surrounding quads to each vertex. If we don't do this, then // vertices near the equator will recieve only half of the correct probability. float area = A.area() + B.area(); m_invArea[i0] += area; m_invArea[i1] += area; m_invArea[i2] += area; m_invArea[i3] += area; } m_tree.setContents(triArray, vertexArray); //ASKMORGAN vertexArray.vertex.clear(); for (int i = 0; i < m_invArea.size(); ++i) { // Multiply by a small number to keep these from getting too large m_invArea[i] = 0.001f / m_invArea[i]; } } shared_ptr<VertexBuffer> dataArea = VertexBuffer::create(sizeof(Vector3) * m_meshVertex.size(), VertexBuffer::WRITE_EVERY_FEW_FRAMES); m_gpuMeshVertex = AttributeArray(m_meshVertex, dataArea); shared_ptr<VertexBuffer> indexArea = VertexBuffer::create(sizeof(int) * m_meshIndex.size(), VertexBuffer::WRITE_ONCE); m_gpuMeshIndex = IndexStream(m_meshIndex, indexArea); m_dirty = false; }