예제 #1
0
void WrapOrthonormalize (TomVM& vm) {

    // Fetch matrix
    if (!ReadMatrix (vm, vm.GetIntParam (1), m1))
        return;

    // Orthonormalize
    Orthonormalize (m1);

    // Create new matrix and assign to register
    vm.Reg ().IntVal () = FillTempRealArray2D (vm.Data (), vm.DataTypes (), 4, 4, m1);
}
예제 #2
0
//Takes the black hole mass and angular momentum, the position of the camera, the look matrix,
//  an integer code specifying the coordinate types and an integer code specifying the type
//  of camera. 
//The coordinates: 
//  0 - past EF coordinates.
//  1 - future EF coordinates.
//  2 - cartesian r > 0.
//  3 - cartesian r < 0.
//The camera types: 
//  0 - Spherical camera, like the human eye.
//  1 - Flat camera, like an ordinary camera. 
void Camera3D::GenerateCamera(double *position, double *look, int coord, int cameratype)
{
    double newvec[12];
    double *cameraup, *cameraleft, *camera;
    double length, x, y, z, theta, phi;
    static int i, j,k;
    pix = pixh * pixv;
    light = new double[pix*13];

    double metric[16];
    coordinate = CoordinateTest(coord, position);
    if(coord == 3) blackhole->reverseMass();
    if(coordinate != coord) 
        CoordinateSwitch(blackhole, position, look, 4, coordinate, coord);

    switch (coordinate) {
        case PAST_EF :
            blackhole->MetricPastEddingtonFinkelstein(position, metric);
            break;
        case FUTURE_EF:
            blackhole->MetricFutureEddingtonFinkelstein(position, metric);
            break;
        case CARTESIAN_POSITIVE_R:
            blackhole->MetricCartesian(position, metric);
            break;
        case CARTESIAN_NEGATIVE_R:
            blackhole->reverseMass();
            blackhole->MetricCartesian(position, metric);
            break;
    }

    for(i = 0; i < 16; i++) directions[i] = look[i];
    pos[0] = position[0]; pos[1] = position[1]; pos[2] = position[2]; pos[3] = position[3]; 
    Orthonormalize(directions, metric);

    Transpose(directions);

    for(i = 0; i < pixh; i++) {
        for(j = 0; j < pixv; j++) {
            camera = &light[(pixv*i+j)*13];
            cameraup = &camera[4];
            cameraleft = &camera[8];

            switch (cameratype) {
                case CAMERA_SPHERICAL:
                    theta = 0.785398*(-fovv + 2*fovv * (zoomv1 + (zoomv2 - zoomv1) * (j + .5) /((double) pixv)));
                    phi = 0.785398*(-fovh + 2*fovh * (zoomh1 + (zoomh2 - zoomh1) * (i + .5) /((double) pixh)));
                    z = cos(theta)*cos(phi);
                    x = -cos(theta)*sin(phi);
                    y = sin(theta);
                    break;
                case CAMERA_FLAT:
                    camera[12] = 1.0;
                    x = -fovh + 2*fovh * (zoomh1 + (zoomh2 - zoomh1) * (i + .5) /((double) pixh));
                    y = -fovv + 2*fovv * (zoomv1 + (zoomv2 - zoomv1) * (j + .5) /((double) pixv));
                    z = 1.0;
            }

            length = pow(x*x + y*y + z*z, .5);
            newvec[3] = x/length; newvec[1] = z/length; newvec[2] = y/length;
            newvec[0] = -pow(newvec[1]*newvec[1] + newvec[2]*newvec[2] + newvec[3]*newvec[3], .5);
            MatrixDotVector(camera, directions, newvec);

            length = pow(y*y + 1, .5);
            newvec[3] = 0;
            newvec[1] = -y/length;
            newvec[2] = 1/length;
            newvec[0] = -pow(newvec[1]*newvec[1] + newvec[2]*newvec[2] + newvec[3]*newvec[3], .5);
            MatrixDotVector(cameraup, directions, newvec);

            length = pow((1 + x*x + y*y)/(y*y + 1), .5);
            newvec[3] = -1/length;
            newvec[1] = x/((1+y*y)*length);
            newvec[2] = y*newvec[1];
            newvec[0] = -pow(newvec[1]*newvec[1] + newvec[2]*newvec[2] + newvec[3]*newvec[3], .5);
            MatrixDotVector(cameraleft, directions, newvec);

        }
    }
}