void SurfaceSet::calcInvRot(){ float mat[16]; glGetFloatv(GL_MODELVIEW_MATRIX, mat); //This one is tricky: the matrix below is the GL matrix (different row/column convention than QT) with the 3x3 part transposed. //This inverts the rotation and does weird stuff to the scale... QMatrix4x4* invRotMat = new QMatrix4x4(mat[0],mat[1],mat[2],mat[12],mat[4],mat[5],mat[6],mat[13],mat[8],mat[9],mat[10],mat[14],mat[3],mat[7],mat[11],mat[15]); //QMatrix4x4* invRotMat = new QMatrix4x4(mat[0],mat[1],mat[2],mat[3],mat[4],mat[5],mat[6],mat[7],mat[8],mat[9],mat[10],mat[11],mat[12],mat[13],mat[14],mat[15]); const QVector3D xVec(1,0,0); const QVector3D yVec(0,1,0); const QVector3D zVec(0,0,1); //mapVector ignores translation and such... invRotX = invRotMat->mapVector(xVec); invRotY = invRotMat->mapVector(yVec); invRotZ = invRotMat->mapVector(zVec); //normalization ignores the scale invRotX.normalize(); invRotY.normalize(); invRotZ.normalize(); }
void tst_QRay3D::transform() { QFETCH(QVector3D, point); QFETCH(QVector3D, direction); QMatrix4x4 m; m.translate(-1.0f, 2.5f, 5.0f); m.rotate(45.0f, 1.0f, 1.0f, 1.0f); m.scale(23.5f); Qt3DRender::RayCasting::QRay3D ray1(point, direction); Qt3DRender::RayCasting::QRay3D ray2(ray1); Qt3DRender::RayCasting::QRay3D ray3; ray1.transform(m); ray3 = ray2.transformed(m); QVERIFY(fuzzyCompare(ray1.origin(), ray3.origin())); QVERIFY(fuzzyCompare(ray1.direction(), ray3.direction())); QVERIFY(fuzzyCompare(ray1.origin(), m * point)); QVERIFY(fuzzyCompare(ray1.direction(), m.mapVector(direction))); }
void tst_QRay3D::transform() { QFETCH(QVector3D, point); QFETCH(QVector3D, direction); QMatrix4x4 m; m.translate(-1.0f, 2.5f, 5.0f); m.rotate(45.0f, 1.0f, 1.0f, 1.0f); m.scale(23.5f); QRay3D ray1(point, direction); QRay3D ray2(ray1); QRay3D ray3; ray1.transform(m); ray3 = ray2.transformed(m); QCOMPARE(ray1.origin(), ray3.origin()); QCOMPARE(ray1.direction(), ray3.direction()); QCOMPARE(ray1.origin(), m * point); QCOMPARE(ray1.direction(), m.mapVector(direction)); }
/*! Returns the spotDirection() for this light after transforming it from world co-ordinates to eye co-ordinates using the top-left 3x3 submatrix within \a transform. The returned result is suitable to be applied to the GL_SPOT_DIRECTION property of \c{glLight()}, assuming that the modelview transformation in the GL context is set to the identity. \sa eyePosition() */ QVector3D QGLLightParameters::eyeSpotDirection (const QMatrix4x4& transform) const { Q_D(const QGLLightParameters); return transform.mapVector(d->spotDirection); }
QPoint CubeItem::cubeIntersection (QWidget *widget, const QPoint &point, int *actualFace) const { // Bail out if no scene. if (!mScene) { *actualFace = -1; return QPoint(); } // Get the combined matrix for the projection. int dpiX = widget->logicalDpiX(); int dpiY = widget->logicalDpiY(); QRectF bounds = boundingRect(); qreal aspectRatio = (bounds.width() * dpiY) / (bounds.height() * dpiX); QMatrix4x4 mv = camera()->modelViewMatrix(); QMatrix4x4 proj = camera()->projectionMatrix(aspectRatio); QMatrix4x4 combined = proj * mv; // Find the relative position of the point within (-1, -1) to (1, 1). QPointF relativePoint = QPointF((point.x() - bounds.center().x()) * 2 / bounds.width(), -(point.y() - bounds.center().y()) * 2 / bounds.height()); // Determine which face of the cube contains the point. QVector3D pt1, pt2, pt3, pt4; bool singleFace = (pressedFace != -1); for (int face = 0; face < 6; ++face) { if (singleFace && face != pressedFace) continue; // Create a polygon from the projected version of the face // so that we can test for point membership. pt1 = QVector3D(vertexData[face * 4 * 3], vertexData[face * 4 * 3 + 1], vertexData[face * 4 * 3 + 2]); pt2 = QVector3D(vertexData[face * 4 * 3 + 3], vertexData[face * 4 * 3 + 4], vertexData[face * 4 * 3 + 5]); pt3 = QVector3D(vertexData[face * 4 * 3 + 6], vertexData[face * 4 * 3 + 7], vertexData[face * 4 * 3 + 8]); pt4 = QVector3D(vertexData[face * 4 * 3 + 9], vertexData[face * 4 * 3 + 10], vertexData[face * 4 * 3 + 11]); QVector<QPointF> points2d; points2d.append((combined * pt1).toPointF()); points2d.append((combined * pt2).toPointF()); points2d.append((combined * pt3).toPointF()); points2d.append((combined * pt4).toPointF()); QPolygonF polygon(points2d); if (!singleFace) { if (!polygon.containsPoint(relativePoint, Qt::OddEvenFill)) continue; } // We want the face that is pointing towards the user. QVector3D v = mv.mapVector (QVector3D::crossProduct(pt2 - pt1, pt3 - pt1)); if (!singleFace && v.z() <= 0.0f) continue; // Determine the intersection between the cube face and // the ray coming from the eye position. QVector3D eyept = proj.inverted().map (QVector3D(relativePoint.x(), relativePoint.y(), -1.0f)); QLine3D ray(QVector3D(0, 0, 0), eyept); QPlane3D plane(mv * pt1, v); QResult<QVector3D> intersection = plane.intersection(ray); if (!intersection.isValid()) continue; QVector3D worldpt = mv.inverted().map(intersection.value()); // Map the world point to the range 0..1. worldpt = (worldpt / CubeSize) + QVector3D(0.5f, 0.5f, 0.5f); // Figure out the texture co-ordinates on the face that // correspond to the point. qreal xtex, ytex; switch (face) { case 0: xtex = 1.0f - worldpt.y(); ytex = 1.0f - worldpt.z(); break; case 1: xtex = 1.0f - worldpt.x(); ytex = 1.0f - worldpt.z(); break; case 2: xtex = worldpt.y(); ytex = 1.0f - worldpt.z(); break; case 3: xtex = worldpt.x(); ytex = 1.0f - worldpt.z(); break; case 4: xtex = worldpt.x(); ytex = 1.0f - worldpt.y(); break; case 5: default: xtex = worldpt.x(); ytex = worldpt.y(); break; } // Turn the texture co-ordinates into scene co-ordinates. bounds = mScene->sceneRect(); xtex *= bounds.width(); ytex *= bounds.height(); int x = qRound(xtex); int y = qRound(ytex); if (x < 0) x = 0; else if (x >= bounds.width()) x = qRound(bounds.width() - 1); if (y < 0) y = 0; else if (y >= bounds.height()) y = qRound(bounds.height() - 1); *actualFace = face; return QPoint(x, y); } *actualFace = -1; return QPoint(); }
/*! Returns the result of transforming this sphere's center() and radius() according to \a matrix. It is assumed that \a matrix contains a uniform scale factor in the x, y, and z directions. Otherwise the radius() in the result is undefined. \sa transform() */ QSphere3D QSphere3D::transformed(const QMatrix4x4 &matrix) const { return QSphere3D(matrix * m_center, matrix.mapVector(QVector3D(m_radius, 0, 0)).length()); }
void SurfaceSet::paintNodes(int ns){ calcInvRot(); SConnections* ccs = scons.at(cs); glPointSize(qMax(size,0.1)); //does not like 0 for pointsize... glLineWidth(size); //for all nodes in the current surface... for (int i = 0; i < ccs->dn.length(); i++){ Node* p = (Node*)(&ccs->dn.at(i)); Node* mlp = (Node*)(&scons.at(minSpace)->dn.at(i)); QVector3D nnormal = p->normal.normalized(); QMatrix4x4* view = viewMatrix(); QVector3D mapped = view->mapVector(nnormal); QVector3D mappedp = view->map(p->p); bool visible = mapped.z() > 0; //normal points to camera //TODO: poor guys clipping, should take ar into account... double clip = 1; visible &= (mappedp.x()>-clip)&&(mappedp.x()<clip)&&(mappedp.y()>-clip)&&(mappedp.y()<clip); if (visible) { //How many connections have a value above the threshold? //TODO: Change p to whatever makes sense, make conditional on pies? move? int cOver = 0; for (int count = 0; count < p->ncs.length(); count++){ if ((p->ncs.at(count)->v > threshold) && (mlp->ncs.at(count)->length()>minlength)) cOver++; } int nth = 0; //the how-manieth drawn connection for the pie chart... QVector3D zshift = glyphRadius*invRotZ; if (ns==4) { //pie charts glShadeModel(GL_FLAT); glBegin(GL_TRIANGLE_FAN); glVertex3f(p->p.x()+zshift.x(),p->p.y()+zshift.y(),p->p.z()+zshift.z()); } QVector3D pieClosePoint; float cr,cg,cb; //TODO: Wouldn't iterating through ALL nodes and indexing be easier, taking the number of nodes now into account? for (int j=0; j<p->ncs.length();j++){ //scaled vector from current point to point on the other side: edges are now connected to the nodes with fn == n.p Connection* diffc = ((Node*)(&scons.at(geo)->dn.at(i)))->ncs.at(j); QVector3D diff = (diffc->tn-diffc->fn) * glyphRadius/100.0; Node* colorNode = (Node*)(&scons.at(colorsFrom)->dn.at(i)); Connection* c = colorNode->ncs.at(j); glColor4f(c->r,c->g,c->b,glyphAlpha); bool draw = ((c->v > threshold) && (c->length()>minlength));//TODO: use minSpace if (billboarding && (ns==6)) { diff = diffc->tn; QVector2D xy(diff.x(),diff.y()); xy /= 100; xy.normalize(); double l = diff.z()/2.0+0.5; xy *= l*glyphRadius/100; diff = xy.x()*invRotX + xy.y()*invRotY; } Connection* pieEdge = colorNode->sncs.at(j); if (ns==4) { //pie charts draw = ((pieEdge->v > threshold) && (mlp->ncs.at(pieEdge->origInd)->length()>minlength)); //my brain hurts... if (draw) { if (nth==1) { cr = pieEdge->r; cg = pieEdge->g; cb = pieEdge->b; } glColor4f(pieEdge->r,pieEdge->g,pieEdge->b,glyphAlpha); float t = (nth/(float)cOver)*2*M_PI; nth++; float rad = norm*glyphRadius/3 + (1-norm)*glyphRadius*qSqrt(cOver)/30.0; diff = rad*qSin(t)*invRotX + rad*qCos(t)*invRotY; } } QVector3D p_shifted = p->p + diff; if ((nth==1) && draw && (ns==4)) pieClosePoint = QVector3D(p_shifted.x()+zshift.x(),p_shifted.y()+zshift.y(),p_shifted.z()+zshift.z()); if (!vectors){ glBegin(GL_POINTS); } else if (ns!=4){ glBegin(GL_LINES); if (draw) glVertex3d(p->p.x()+zshift.x(),p->p.y()+zshift.y(),p->p.z()+zshift.z()); } if (draw) glVertex3d(p_shifted.x()+zshift.x(),p_shifted.y()+zshift.y(),p_shifted.z()+zshift.z()); if (ns!=4) glEnd(); } //TODO: deal with two/one point issue... if (ns==4) { glColor4f(cr,cg,cb,glyphAlpha); glVertex3f(pieClosePoint.x(),pieClosePoint.y(),pieClosePoint.z()); glEnd(); } } } }