コード例 #1
0
ファイル: face_util.cpp プロジェクト: JackTing/Sunshine
    FaceHit closestFace(QList<Triangle> triangles, Point3 rayOrig, Vector3 rayDir, bool onlySelectedMeshes)
    {
        FaceHit faceHit;
        float validMin = 99999999999.9f;
        float validMax = -1.0f;

        foreach (Triangle triangle, triangles) {
            QMatrix4x4 objectToWorld = triangle.mesh->objectToWorld();
            OpenMesh::Vec3f p0 = triangle.mesh->_mesh->point(triangle.mesh->_mesh->from_vertex_handle(triangle.a));
            OpenMesh::Vec3f p1 = triangle.mesh->_mesh->point(triangle.mesh->_mesh->from_vertex_handle(triangle.b));
            OpenMesh::Vec3f p2 = triangle.mesh->_mesh->point(triangle.mesh->_mesh->from_vertex_handle(triangle.c));
            Point3 v0 = objectToWorld.map(Vector3(p0[0], p0[1], p0[2]));
            Point3 v1 = objectToWorld.map(Vector3(p1[0], p1[1], p1[2]));
            Point3 v2 = objectToWorld.map(Vector3(p2[0], p2[1], p2[2]));

            Vector3 edge1 = v1 - v0;
            Vector3 edge2 = v2 - v0;

            Vector3 pvec = Vector3::crossProduct(rayDir, edge2);
            float det = Vector3::dotProduct(edge1, pvec);

            // if ray not aligned with triangle
            if (!(det > -0.00001f && det < 0.00001f)) {
                float invDet = 1.0f / det;

                Vector3 tvec = rayOrig - v0;

                float u = Vector3::dotProduct(tvec, pvec) * invDet;
                if (!(u < 0.0f || u > 1.0f)) {
                    Vector3 qvec = Vector3::crossProduct(tvec, edge1);

                    float v = Vector3::dotProduct(rayDir, qvec) * invDet;
                    if (!(v < 0.0f || u+v > 1)) { // intersection!
                        float t = Vector3::dotProduct(edge2, qvec) * invDet;

                        if (t < validMin && t > 0) {
                            faceHit.nearFace = triangle.mesh->_mesh->face_handle(triangle.a);
                            faceHit.nearMesh = triangle.mesh;
                            faceHit.range.setX(t);
                            validMin = t;
                        }
                        if (t > validMax) {
                            faceHit.farFace = triangle.mesh->_mesh->face_handle(triangle.a);
                            faceHit.farMesh = triangle.mesh;
                            faceHit.range.setY(t);
                            validMax = t;
                        }
                    }
                }
            } else {
                //println("ray aligned with triangle")
            }
        }
コード例 #2
0
void ViewportViewPerspective::initializeViewport(const QVector3D& surfmin, const QVector3D& surfmax, int width, int height)
{
    // add margin to max/min
    QVector3D diff = 0.01 * _margin * (surfmax - surfmin);
    QVector3D min = surfmin - diff;
    QVector3D max = surfmax + diff;

    // calculate the midpoint of the bounding box, which is used as the center of the
    // model for rotating the model
    _midpoint = 0.5 * (min + max);

    QVector3D panpoint = _midpoint;
    panpoint.setX(panpoint.x() + _panX);
    panpoint.setY(panpoint.y() + _panY);
    panpoint.setZ(panpoint.z() + _panZ);

    // calculate the distance of the camera to the center of the model, following from
    // the field of view from the camera
    float dist = sqrt((max.y() - min.y()) * (max.y() - min.y())
                      + (max.z() - min.z()) * (max.z() - min.z()));
    if (dist == 0)
        dist = 1E-2f;
    if (atan(_field_of_view) != 0) {
        _distance = 1.5 * dist / atan(_field_of_view);
        if (_distance > 1E5)
            _distance = 1E5;
    }
    else
        _distance = 1E5;

    // build the vertex transformation matrix from the perspective
    // and the angle, elevation

    float aspect_ratio = 1.0;
    if (height != 0)
        aspect_ratio = width / static_cast<float>(height);
    _proj = QMatrix4x4();

    // create projection
    _proj.perspective(RadToDeg(_field_of_view) / _zoom, aspect_ratio, 0.1f, 40.0f);
    
    // find the camera location
    QMatrix4x4 model;
    model.translate(_panX, _panY, _panZ);
    model.rotate(-_elevation, 1, 0, 0);
    model.rotate(_angle, 0, 0, 1);
    _camera_location = model.map(QVector3D(max.x() + _distance, 0, 0));
    
    // view matrix
    QMatrix4x4 view;
    view.lookAt(_camera_location, panpoint, QVector3D(0,0,1));
    
    _view = view;

    finishSetup();
}
コード例 #3
0
ファイル: cubeview.cpp プロジェクト: slavablind91/code
void CubeView::paintGL(QGLPainter *painter)
{
    needsUpdate = false;

    QRect windowViewport = painter->viewport();

    painter->modelViewMatrix().push();
    painter->projectionMatrix().push();
    painter->pushSurface(fbo);
    painter->setViewport(painter->surfaceSize());

    painter->setCamera(innerCamera);
    painter->modelViewMatrix().rotate(tangle, 0.0f, 1.0f, 0.0f);

    painter->setFaceColor(QGL::AllFaces, QColor(170, 202, 0));
    painter->setStandardEffect(QGL::LitMaterial);

    painter->setDepthTestingEnabled(true);

    painter->clear();
    teapot->draw(painter);

    painter->popSurface();
    painter->projectionMatrix().pop();
    painter->modelViewMatrix().pop();
    painter->setViewport(windowViewport);

    painter->setDepthTestingEnabled(false);

    painter->modelViewMatrix().rotate(oangle, 0.0f, 1.0f, 0.0f);

    QMatrix4x4 m = painter->modelViewMatrix();
    QVector3D cube1pos(-1.5f, 0.0f, 0.0f);
    QVector3D cube2pos(1.5f, 0.0f, 0.0f);

    if (m.map(cube1pos).z() < m.map(cube2pos).z()) {
        drawCube1(painter, cube1pos);
        drawCube2(painter, cube2pos);
    } else {
        drawCube2(painter, cube2pos);
        drawCube1(painter, cube1pos);
    }
}
コード例 #4
0
ファイル: Mesh.cpp プロジェクト: skanight/ClothSimulation
float Mesh::intersects(const QVector3D &pos, const QVector3D &direction) const
{
    float res = -1;
    QMatrix4x4 inv = world().inverted();
    QVector3D invPos = inv.map(pos);
    inv.setColumn(3, QVector4D(0, 0, 0, 1));
    QVector3D invDir = inv.map(direction);
    for (int i = 0; i < _faces.size(); ++i) {
        int p0Idx = _faces[i] * 3,
            p1Idx = _faces[++i] * 3,
            p2Idx = _faces[++i] * 3;
        QVector3D p0(_vertices[p0Idx], _vertices[p0Idx + 1], _vertices[p0Idx + 2]),
                  p1(_vertices[p1Idx], _vertices[p1Idx + 1], _vertices[p1Idx + 2]),
                  p2(_vertices[p2Idx], _vertices[p2Idx + 1], _vertices[p2Idx + 2]);

        float current = _triangleIntersection(invPos, invDir, p0, p1, p2);
        if (current > 0 && (res == -1 || current < res))
            res = current;
    }
    return res;
}
コード例 #5
0
/**
  \brief When a station has been selected, this updates the shot lines

  This shows the shot lines from the selected station.  If no station is
  currently selected, this will hide the lines
  */
void cwScrapStationView::updateShotLines() {
    if(scrap() == nullptr) { return; }
    if(scrap()->parentNote() == nullptr) { return; }
    if(scrap()->parentNote()->parentTrip() == nullptr) { return; }
    if(transformUpdater() == nullptr) { return; }

    cwNoteStation noteStation = scrap()->station(selectedItemIndex());
    //Get the current trip
    cwNote* note = scrap()->parentNote();
    cwTrip* trip = note->parentTrip();
    cwCave* cave = trip->parentCave();
    cwStationPositionLookup stationPositionLookup = cave->stationPositionLookup();

    //Clear all the lines
    ShotLines.clear();

    if(noteStation.isValid() && stationPositionLookup.hasPosition(noteStation.name())) {
        QString stationName = noteStation.name();
        QSet<cwStation> neighboringStations = trip->neighboringStations(stationName);

        //The position of the selected station
        QVector3D selectedStationPos = stationPositionLookup.position(noteStation.name());

        //Create the matrix to covert global position into note position
        QMatrix4x4 noteTransformMatrix = scrap()->noteTransformation()->matrix(); //Matrix from page coordinates to cave coordinates
        noteTransformMatrix = noteTransformMatrix.inverted(); //From cave coordinates to page coordinates

        QMatrix4x4 notePageAspect = note->scaleMatrix().inverted(); //The note's aspect ratio

        QMatrix4x4 offsetMatrix;
        offsetMatrix.translate(-selectedStationPos);

        QMatrix4x4 dotPerMeter;
        dotPerMeter.scale(note->dotPerMeter(), note->dotPerMeter(), 1.0);

        QMatrix4x4 noteStationOffset;
        noteStationOffset.translate(QVector3D(noteStation.positionOnNote()));

        QMatrix4x4 toNormalizedNote = noteStationOffset *
                dotPerMeter *
                notePageAspect *
                noteTransformMatrix *
                offsetMatrix;

        //Go through all the neighboring stations and add the position to the line
        foreach(cwStation station, neighboringStations) {

            QVector3D currentPos = stationPositionLookup.position(station.name());
            QVector3D normalizeNotePos = toNormalizedNote.map(currentPos);

            ShotLines.append(QLineF(noteStation.positionOnNote(), normalizeNotePos.toPointF()));
        }
コード例 #6
0
ファイル: Camera.cpp プロジェクト: skanight/Animation
QVector3D Camera::mapPoint(const QPoint screenPos) const
{
    float x = screenPos.x(),
        y = screenPos.y(),
        width = _viewportSize.width(),
        height = _viewportSize.height(),
        xrel = (x * 2 - width) / width,
        yrel = -(y * 2 - height) / height;

    // Reverse the projection and return the point in world co-ordinates.
    QMatrix4x4 m = _world.inverted() * _projection.inverted();
    QMatrix4x4 invm = m;
    return invm.map(QVector3D(xrel, yrel, -1.0f));
}
コード例 #7
0
ファイル: qglcamera.cpp プロジェクト: Distrotech/qt3d
/*!
    Maps \a point from viewport co-ordinates to eye co-ordinates.
    The size of the viewport is given by \a viewportSize, and its
    aspect ratio by \a aspectRatio.

    The returned vector will have its x and y components set to the
    position of the point on the near plane, and the z component
    set to -nearPlane().

    This function is used for converting a mouse event's position
    into eye co-ordinates within the current camera view.
*/
QVector3D QGLCamera::mapPoint
    (const QPoint& point, float aspectRatio, const QSize& viewportSize) const
{
    Q_D(const QGLCamera);

    // Rotate the co-ordinate system to account for the screen rotation.
    int x = point.x();
    int y = point.y();
    int width = viewportSize.width();
    int height = viewportSize.height();
    if (!d->adjustForAspectRatio)
        aspectRatio = 1.0f;
    if (d->screenRotation == 90) {
        if (aspectRatio != 0.0f)
            aspectRatio = 1.0f / aspectRatio;
        qSwap(x, y);
        qSwap(width, height);
        y = height - 1 - y;
    } else if (d->screenRotation == 180) {
        x = width - 1 - x;
        y = height - 1 - y;
    } else if (d->screenRotation == 270) {
        if (aspectRatio != 0.0f)
            aspectRatio = 1.0f / aspectRatio;
        qSwap(x, y);
        qSwap(width, height);
    }

    // Determine the relative distance from the middle of the screen.
    // After this xrel and yrel are typically between -1.0 and +1.0
    // (unless the point was outside the viewport).  The yrel is
    // flipped upside down to account for the incoming co-ordinate
    // being left-handed, but the world being right-handed.
    float xrel, yrel;
    if (width)
        xrel = ((float(x * 2)) - float(width)) / float(width);
    else
        xrel = 0.0f;
    if (height)
        yrel = -((float(y * 2)) - float(height)) / float(height);
    else
        yrel = 0.0f;

    // Reverse the projection and return the point in world co-ordinates.
    QMatrix4x4 m = projectionMatrix(aspectRatio);
    QMatrix4x4 invm = m.inverted();
    return invm.map(QVector3D(xrel, yrel, -1.0f));
}
コード例 #8
0
    QRectF clipRect() const
    {
        // Start with an invalid rect.
        QRectF resultRect(0, 0, -1, -1);

        for (const QSGClipNode* clip = clipList(); clip; clip = clip->clipList()) {
            QMatrix4x4 clipMatrix;
            if (pageNode()->devicePixelRatio() != 1.0) {
                clipMatrix.scale(pageNode()->devicePixelRatio());
                if (clip->matrix())
                    clipMatrix *= (*clip->matrix());
            } else if (clip->matrix())
                clipMatrix = *clip->matrix();

            QRectF currentClip;

            if (clip->isRectangular())
                currentClip = clipMatrix.mapRect(clip->clipRect());
            else {
                const QSGGeometry* geometry = clip->geometry();
                // Assume here that clipNode has only coordinate data.
                const QSGGeometry::Point2D* geometryPoints = geometry->vertexDataAsPoint2D();

                // Clip region should be at least triangle to make valid clip.
                if (geometry->vertexCount() < 3)
                    continue;

                QPolygonF polygon;

                for (int i = 0; i < geometry->vertexCount(); i++)
                    polygon.append(clipMatrix.map(QPointF(geometryPoints[i].x, geometryPoints[i].y)));
                currentClip = polygon.boundingRect();
            }

            if (currentClip.isEmpty())
                continue;

            if (resultRect.isValid())
                resultRect &= currentClip;
            else
                resultRect = currentClip;
        }

        return resultRect;
    }
コード例 #9
0
ProVector AsemanSensors::rebase(const ProVector &v)
{
    ProVector res;

    const qreal x = v.x;
    const qreal y = v.y;
    const qreal z = v.z;

    QMatrix4x4 m;
    m.rotate(p->zeroX*180/M_PI,1,0,0);
    m.rotate(p->zeroY*180/M_PI,0,1,0);

    const QVector3D & v3d = m.map(QVector3D(x,y,z));

    res.x = v3d.x();
    res.y = v3d.y();
    res.z = v3d.z();

    return res;
}
コード例 #10
0
// ドロップ時のスロット
// レイヤ追加
void AnimationForm::slot_dropedImage( QRectF rect, QPoint pos, int imageIndex )
{
	CObjectModel *pModel = m_pEditData->getObjectModel() ;
	int frameNum  = ui->label_frame->value() ;
	QModelIndex index = ui->treeView->currentIndex() ;

	if ( !index.isValid() ) {
		qWarning() << "slot_dropedImage current index invalid 0" ;
		return ;
	}

	ObjectItem *pObjItem = pModel->getObject(index) ;
	if ( !pObjItem ) {
		qWarning() << "slot_dropedImage current obj 0" ;
		return ;
	}

	if ( !m_pSetting->getLayerHierarchy() ) {
		index = pObjItem->getIndex() ;
	}

	pos -= QPoint((m_pSetting->getAnmWindowW()/2), (m_pSetting->getAnmWindowH()/2)) ;	// GLWidgetのローカルポスに変換

	ObjectItem *pItem = pModel->getItemFromIndex(index) ;
	bool valid ;
	QMatrix4x4 mat = pItem->getDisplayMatrix(frameNum, &valid) ;
	if ( valid ) {
		QMatrix4x4 inv = mat.inverted(&valid) ;
		if ( valid ) {
			pos = inv.map(pos) ;
		}
	}

	index = m_pEditData->cmd_addItem(QString("Layer %1").arg(pObjItem->childCount()), index) ;
	ui->treeView->setCurrentIndex(index) ;
//	m_pEditData->setSelIndex(index) ;

	// ツリービューに追加
    FrameData frameData ;
	frameData.pos_x = pos.x() ;
	frameData.pos_y = pos.y() ;
	frameData.pos_z = 0 ;
	frameData.rot_x =
	frameData.rot_y =
	frameData.rot_z = 0 ;
	frameData.center_x = (rect.width()) / 2 ;
	frameData.center_y = (rect.height()) / 2 ;
	frameData.frame = frameNum ;
	frameData.fScaleX = frameData.fScaleY = 1.0f ;
	frameData.setRect(rect);
	frameData.nImage = imageIndex ;
	frameData.bUVAnime = false ;
	frameData.rgba[0] =
	frameData.rgba[1] =
	frameData.rgba[2] =
	frameData.rgba[3] = 255 ;

	QList<QWidget *> updateWidget ;
	updateWidget << m_pGlWidget ;
	updateWidget << m_pDataMarker ;

	m_pEditData->cmd_addFrameData(index, frameData, updateWidget) ;
}
コード例 #11
0
ファイル: shader_phong.cpp プロジェクト: untereiner/CGoGN_2
void ShaderPhongGen::set_local_light_position(const QVector3D& l, const QMatrix4x4& view_matrix)
{
	QVector4D loc4 = view_matrix.map(QVector4D(l, 1.0));
	prg_.setUniformValue(unif_light_position_, QVector3D(loc4) / loc4.w());
}
コード例 #12
0
void SurfaceSet::paintNodes(int ns){

    calcInvRot();
    SConnections* ccs = scons.at(cs);
    glPointSize(qMax(size,0.1));  //does not like 0 for pointsize...
    glLineWidth(size);

    //for all nodes in the current surface...
    for (int i = 0; i < ccs->dn.length(); i++){
        Node* p = (Node*)(&ccs->dn.at(i));
        Node* mlp = (Node*)(&scons.at(minSpace)->dn.at(i));
        QVector3D nnormal = p->normal.normalized();
        QMatrix4x4* view = viewMatrix();
        QVector3D mapped = view->mapVector(nnormal);
        QVector3D mappedp = view->map(p->p);

        bool visible = mapped.z() > 0; //normal points to camera
        //TODO: poor guys clipping, should take ar into account...
        double clip = 1;
        visible &= (mappedp.x()>-clip)&&(mappedp.x()<clip)&&(mappedp.y()>-clip)&&(mappedp.y()<clip);

        if (visible) {
            //How many connections have a value above the threshold?
            //TODO: Change p to whatever makes sense, make conditional on pies? move?
            int cOver = 0;
            for (int count = 0; count < p->ncs.length(); count++){
                if ((p->ncs.at(count)->v > threshold) && (mlp->ncs.at(count)->length()>minlength)) cOver++;
            }
            int nth = 0; //the how-manieth drawn connection for the pie chart...
            QVector3D zshift = glyphRadius*invRotZ;
            if (ns==4) {
                //pie charts
                glShadeModel(GL_FLAT);
                glBegin(GL_TRIANGLE_FAN);
                glVertex3f(p->p.x()+zshift.x(),p->p.y()+zshift.y(),p->p.z()+zshift.z());
            }
            QVector3D pieClosePoint;
            float cr,cg,cb;
            //TODO: Wouldn't iterating through ALL nodes and indexing be easier, taking the number of nodes now into account?
            for (int j=0; j<p->ncs.length();j++){
                //scaled vector from current point to point on the other side: edges are now connected to the nodes with fn == n.p
                Connection* diffc = ((Node*)(&scons.at(geo)->dn.at(i)))->ncs.at(j);
                QVector3D diff = (diffc->tn-diffc->fn) * glyphRadius/100.0;
                Node* colorNode = (Node*)(&scons.at(colorsFrom)->dn.at(i));
                Connection* c = colorNode->ncs.at(j);
                glColor4f(c->r,c->g,c->b,glyphAlpha);
                bool draw = ((c->v > threshold) && (c->length()>minlength));//TODO: use minSpace
                if (billboarding && (ns==6)) {
                    diff = diffc->tn;
                    QVector2D xy(diff.x(),diff.y());
                    xy /= 100;
                    xy.normalize();
                    double l = diff.z()/2.0+0.5;
                    xy *= l*glyphRadius/100;
                    diff = xy.x()*invRotX + xy.y()*invRotY;
                }
                Connection* pieEdge = colorNode->sncs.at(j);
                if (ns==4) {
                    //pie charts
                    draw = ((pieEdge->v > threshold) && (mlp->ncs.at(pieEdge->origInd)->length()>minlength)); //my brain hurts...
                    if (draw) {
                        if (nth==1) {
                            cr = pieEdge->r;
                            cg = pieEdge->g;
                            cb = pieEdge->b;
                        }
                        glColor4f(pieEdge->r,pieEdge->g,pieEdge->b,glyphAlpha);
                        float t = (nth/(float)cOver)*2*M_PI;
                        nth++;
                        float rad = norm*glyphRadius/3 + (1-norm)*glyphRadius*qSqrt(cOver)/30.0;
                        diff = rad*qSin(t)*invRotX + rad*qCos(t)*invRotY;
                    }
                }
                QVector3D p_shifted = p->p + diff;
                if ((nth==1) && draw && (ns==4)) pieClosePoint = QVector3D(p_shifted.x()+zshift.x(),p_shifted.y()+zshift.y(),p_shifted.z()+zshift.z());
                if (!vectors){
                    glBegin(GL_POINTS);
                } else if (ns!=4){
                    glBegin(GL_LINES);
                    if (draw) glVertex3d(p->p.x()+zshift.x(),p->p.y()+zshift.y(),p->p.z()+zshift.z());
                }
                if (draw) glVertex3d(p_shifted.x()+zshift.x(),p_shifted.y()+zshift.y(),p_shifted.z()+zshift.z());
                if (ns!=4) glEnd();
            }

            //TODO: deal with two/one point issue...
            if (ns==4) {
                glColor4f(cr,cg,cb,glyphAlpha);
                glVertex3f(pieClosePoint.x(),pieClosePoint.y(),pieClosePoint.z());
                glEnd();
            }
        }
    }
}