avtDataObject_p
avtEvalTransformExpression::TransformData(avtDataObject_p input)
{
    vtkMatrix4x4 *mat = vtkMatrix4x4::New();
    mat->SetElement(0, 0, inputParameters[0]);
    mat->SetElement(0, 1, inputParameters[1]);
    mat->SetElement(0, 2, inputParameters[2]);
    mat->SetElement(1, 0, inputParameters[3]);
    mat->SetElement(1, 1, inputParameters[4]);
    mat->SetElement(1, 2, inputParameters[5]);
    mat->SetElement(2, 0, inputParameters[6]);
    mat->SetElement(2, 1, inputParameters[7]);
    mat->SetElement(2, 2, inputParameters[8]);

    avtDataset_p ds;
    CopyTo(ds, input);
    avtSourceFromAVTDataset termsrc(ds);

    avtCustomTransform transform;
    transform.SetMatrix(mat);
    transform.SetInput(termsrc.GetOutput());

    avtDataObject_p output = transform.GetOutput();
    output->Update(GetGeneralContract());

    mat->Delete();

    return output;
}
// ****************************************************************************
//  Method: avtConnComponentsWeightedVariableQuery::ApplyFilters
//
//  Purpose:
//      Applies the filters to the input.
//
//  Programmer: Cyrus Harrison
//  Creation:   February 2, 2007
//
//  Modifications:
//    Cyrus Harrison, Mon Jun  6 17:04:12 PDT 2011
//    Support lines.
//
// ****************************************************************************
avtDataObject_p
avtConnComponentsWeightedVariableQuery::ApplyFilters(avtDataObject_p inData)
{
    //
    // Create an artificial pipeline.
    //
    avtDataset_p ds;
    CopyTo(ds, inData);
    avtSourceFromAVTDataset termsrc(ds);
    avtDataObject_p dob = termsrc.GetOutput();

    // add either lengthFilter, areaFilter, or volumeFilter based on input dimension
    int topo = GetInput()->GetInfo().GetAttributes().GetTopologicalDimension();
    if (topo == 1)
    {
        debug5 << "ConnComponentsWeightedVariable query using length" << endl;
        lengthFilter->SetInput(dob);
        dob = lengthFilter->GetOutput();
    }
    else if (topo == 2)
    {
        if (GetInput()->GetInfo().GetAttributes().GetMeshCoordType()== AVT_XY)
        {
            debug5 << "ConnComponentsWeightedVariable query using "
                   << "Area" << endl;

            areaFilter->SetInput(dob);
            dob = areaFilter->GetOutput();
        }
        else
        {
            debug5 << "ConnComponentsWeightedVariable query using "
                   << "RevolvedVolume" << endl;

            revolvedVolumeFilter->SetInput(dob); 
            dob = revolvedVolumeFilter->GetOutput();
        }
    }
    else
    {
        debug5 << "ConnComponentsWeightedVariable query using "
               << "Volume" << endl;

        volumeFilter->SetInput(dob);
        dob = volumeFilter->GetOutput();
    }

    cclFilter->SetInput(dob);
    dob = cclFilter->GetOutput();

    avtContract_p contract = 
        inData->GetOriginatingSource()->GetGeneralContract();

    cclFilter->GetOutput()->Update(contract);

    return cclFilter->GetOutput();
}
Ejemplo n.º 3
0
void
avtLineScanQuery::Execute(avtDataTree_p tree)
{
    avtDataset_p input = GetTypedInput();

    int numPasses = numLines / numLinesPerIteration;
    if (numLines % numLinesPerIteration != 0)
        numPasses++;

    avtContract_p contract =
        input->GetOriginatingSource()->GetGeneralContract();
    if (GetInput()->GetInfo().GetAttributes().ValidActiveVariable())
        varname = GetInput()->GetInfo().GetAttributes().GetVariableName();
    else 
        varname = contract->GetDataRequest()->GetVariable();

    for (int i = 0 ; i < numPasses ; i++)
    {
        int numForLast = (numLines % numLinesPerIteration);
        numForLast = (numForLast == 0 ? numLinesPerIteration : numForLast);
        int linesForThisPass = (i == numPasses-1  ? numForLast 
                                                  : numLinesPerIteration);

        //
        // Create an artificial pipeline.
        //
        avtDataset_p ds;
        CopyTo(ds, input);
        avtSourceFromAVTDataset termsrc(ds);
        avtDataObject_p dob = termsrc.GetOutput();
    
        avtLineScanFilter *filt = CreateLineScanFilter();
        filt->SetNumberOfLines(linesForThisPass);
        filt->SetRandomSeed(i);
        filt->SetInput(dob);
        //
        // Cause our artificial pipeline to execute.
        //
        filt->GetOutput()->Update(contract);
        lines = filt->GetLines();

        avtDataset_p ds2 = filt->GetTypedOutput();
        avtDataTree_p tree = ds2->GetDataTree();
        ExecuteTree(tree);
        lines = NULL;
        delete filt;

        //
        // Reset the timeout for the next iteration.
        //
        avtCallback::ResetTimeout(60*5);
    }
}
avtDataObject_p
avtActualDataMinMaxQuery::ApplyFilters(avtDataObject_p inData)
{
    Preparation(inData);

    bool zonesPreserved  = GetInput()->GetInfo().GetValidity().GetZonesPreserved();
    zonesPreserved = (bool)UnifyMinimumValue((int)zonesPreserved);

    if (!timeVarying && zonesPreserved)
    {
        avtContract_p contract =
            inData->GetOriginatingSource()->GetGeneralContract();

        avtDataset_p ds;
        CopyTo(ds, inData);
        avtSourceFromAVTDataset termsrc(ds);
        avtDataObject_p obj = termsrc.GetOutput();
        condense->SetInput(obj);
        avtDataObject_p retObj = condense->GetOutput();
        retObj->Update(contract);
        return retObj;
    }
    else
    {
        avtDataRequest_p oldSpec = inData->GetOriginatingSource()->
            GetGeneralContract()->GetDataRequest();

        avtDataRequest_p newDS = new
            avtDataRequest(oldSpec, querySILR);
        newDS->SetTimestep(queryAtts.GetTimeStep());
        newDS->SetVariable(queryAtts.GetVariables()[0].c_str());

        if (!zonesPreserved)
            newDS->TurnZoneNumbersOn();

        avtContract_p contract =
            new avtContract(newDS, queryAtts.GetPipeIndex());

        avtDataObject_p temp;
        CopyTo(temp, inData);
        condense->SetInput(temp);
        avtDataObject_p retObj = condense->GetOutput();
        retObj->Update(contract);
        return retObj;
    }
}
avtDataObject_p
avtEvalPointExpression::TransformData(avtDataObject_p input)
{
    //
    // Gameplan:  For each point (X,Y,Z), the vector to a point (X0, Y0, Z0)
    // is (X0-X, Y0-Y, Z0-Z).  So we want to move 2*(X0-X, Y0-Y, Z0-Z).
    // Then the final point is (2X0-X, 2Y0-Y, 2Z0-Z).  So set up a transform
    // that does this.
    //
    double X = inputParameters[0];
    double Y = inputParameters[1];
    double Z = inputParameters[2];

    vtkMatrix4x4 *mat = vtkMatrix4x4::New();
    mat->SetElement(0, 0, -1);
    mat->SetElement(1, 1, -1);
    mat->SetElement(2, 2, -1);
    mat->SetElement(0, 3, +2.*X);
    mat->SetElement(1, 3, +2.*Y);
    mat->SetElement(2, 3, +2.*Z);

    avtDataset_p ds;
    CopyTo(ds, input);
    avtSourceFromAVTDataset termsrc(ds);

    avtCustomTransform transform;
    transform.SetMatrix(mat);
    transform.SetInput(termsrc.GetOutput());

    avtDataObject_p output = transform.GetOutput();
    output->Update(GetGeneralContract());

    mat->Delete();

    return output;
}
// ****************************************************************************
//  Method: avtConnComponentsLengthQuery::ApplyFilters
//
//  Purpose:
//      Constructs an artificial pipeline with the connected components and
//      area filters necessary to obtain per component area.
//
//  Programmer: Cyrus Harrison
//  Creation:   Wed Jun 15 13:09:43 PDT 2011
//
// ****************************************************************************
avtDataObject_p
avtConnComponentsLengthQuery::ApplyFilters(avtDataObject_p inData)
{
    // Create an artificial pipeline.
    avtDataset_p ds;
    CopyTo(ds, inData);
    avtSourceFromAVTDataset termsrc(ds);
    avtDataObject_p dob = termsrc.GetOutput();

    // add the area filter to the pipeline
    lengthFilter->SetInput(dob);
    dob = lengthFilter->GetOutput();

    // add the ccl filter to the pipeline
    cclFilter->SetInput(dob);
    dob = cclFilter->GetOutput();

    avtContract_p contract = 
        inData->GetOriginatingSource()->GetGeneralContract();

    cclFilter->GetOutput()->Update(contract);

    return cclFilter->GetOutput();
}
void
avtGhostZoneAndFacelistFilter::Execute(void)
{
    int  timingIndex = visitTimer->StartTimer();

    avtDataObject_p dObj = GetInput();
    avtDataValidity   &v = dObj->GetInfo().GetValidity();

    // Make sure this is the latest info.  This changes under very bizarre
    // circumstances.  ['3352].
    avtDataAttributes &a = dObj->GetInfo().GetAttributes();
    useGhostFilter = (a.GetContainsGhostZones()!=AVT_NO_GHOSTS ? true : false);

    avtDataset_p ds; 
    CopyTo(ds, dObj);
    avtSourceFromAVTDataset termsrc(ds);
    avtDataObject_p data = termsrc.GetOutput(); 

    avtContract_p contractForDB = GetGeneralContract();
    avtDataRequest_p wrongVar = contractForDB->GetDataRequest();
    avtDataRequest_p correctVar = new avtDataRequest(wrongVar, pipelineVariable);
    // By copying the "correct var", our mechanism for telling the SIL to
    // not be used is ignored.  So turn it back on.
    correctVar->GetSIL().useRestriction = false;
    correctVar->GetSIL().dataChunk = -1;

    avtContract_p goodSpec = new avtContract(contractForDB, correctVar);
    
    if (useFaceFilter && !useGhostFilter)
    {
        debug5 << "Using facelist filter only." << endl;
        faceFilter->SetInput(data);
        faceFilter->Update(goodSpec);
        GetOutput()->Copy(*(faceFilter->GetOutput()));
    }
    else if (useGhostFilter && !useFaceFilter)
    {
        debug5 << "Using ghostzone filter only." << endl;
        ghostFilter->SetInput(data);
        ghostFilter->Update(goodSpec);
        GetOutput()->Copy(*(ghostFilter->GetOutput()));
    }
    else if (!useGhostFilter && !useFaceFilter)
    {
        debug5 << "Not applying ghost zone or facelist filter." << endl;
        GetOutput()->Copy(*dObj);
    }
    else
    {
        // if we are using all the data, apply the facelist filter first.
        bool faceFirst = v.GetUsingAllDomains() ||
                         (a.GetContainsGhostZones() == AVT_CREATED_GHOSTS);

        if (faceFirst)
        {
            debug5 << "Using facelist filter before ghostzone filter." << endl;

            if (GetInput()->GetInfo().GetAttributes().
                                           GetContainsExteriorBoundaryGhosts())
            {
                debug5 << "But there are exterior boundaries, so doing a ghost"
                       << " before that!" << endl;
                exteriorBoundaryGhostFilter->SetInput(data);
                data = exteriorBoundaryGhostFilter->GetOutput();
            }

            faceFilter->SetInput(data);
            ghostFilter->SetInput(faceFilter->GetOutput());
            ghostFilter->Update(goodSpec);
            GetOutput()->Copy(*(ghostFilter->GetOutput()));
        }
        else
        {
            debug5 << "Using ghostzone filter before facelist filter." << endl;
            ghostFilter->SetInput(data);
            faceFilter->SetInput(ghostFilter->GetOutput());
            faceFilter->Update(goodSpec);
            GetOutput()->Copy(*(faceFilter->GetOutput()));
        }
    }

    visitTimer->StopTimer(timingIndex, "GhostZone And Facelist Filter");
    visitTimer->DumpTimings();
}
Ejemplo n.º 8
0
void
avtResampleFilter::ResampleInput(void)
{
    int  i, j, k;

    avtDataset_p output = GetTypedOutput();
    double bounds[6] = { 0, 0, 0, 0, 0, 0 };
    bool is3D = GetBounds(bounds);

    debug4 << "Resampling over space: " << bounds[0] << ", " << bounds[1]
           << ": " << bounds[2] << ", " << bounds[3] << ": " << bounds[4]
           << ", " << bounds[5] << endl;
    
    //
    // Our resampling leaves some invalid values in the data range.  The
    // easiest way to bypass this is to get the data range from the input and
    // pass it along (since resampling does not change it in theory).
    //
    double range[2];
    if (GetInput()->GetInfo().GetAttributes().ValidActiveVariable())
    {
        GetDataExtents(range);
        output->GetInfo().GetAttributes().GetDesiredDataExtents()->Set(range);
    }

    avtViewInfo view;
    double scale[3];
    CreateViewFromBounds(view, bounds, scale);

    //
    // What we want the width, height, and depth to be depends on the
    // attributes.
    //
    int width, height, depth;
    GetDimensions(width, height, depth, bounds, is3D);

    //
    // If there are no variables, then just create the mesh and exit.
    //
    bool thereAreNoVariables = 
          (GetInput()->GetInfo().GetAttributes().GetNumberOfVariables() <= 0);
    if (thereAreNoVariables)
    {
        if (PAR_Rank() == 0)
        {
            vtkRectilinearGrid *rg = CreateGrid(bounds, width, height, depth,
                                      0, width, 0, height, cellCenteredOutput, is3D);
            avtDataTree_p tree = new avtDataTree(rg, 0);
            rg->Delete();
            SetOutputDataTree(tree);
        }
        else
        {
            //
            // Putting in a NULL data tree can lead to seg faults, etc.
            //
            avtDataTree_p dummy = new avtDataTree();
            SetOutputDataTree(dummy);
        }

        return;
    }

    //
    // World space is a right-handed coordinate system.  Image space (as used
    // in the sample point extractor) is a left-handed coordinate system.
    // This is because large X is at the right and large Y is at the top.
    // The z-buffer has the closest points at z=0, so Z is going away from the
    // screen ===> left handed coordinate system.  If we reflect across X,
    // then this will account for the difference between the coordinate 
    // systems.
    //
    scale[0] *= -1.;

    //
    // We don't want an Update to go all the way up the pipeline, so make
    // a terminating source corresponding to our input.
    //
    avtDataset_p ds;
    avtDataObject_p dObj = GetInput();
    CopyTo(ds, dObj);
    avtSourceFromAVTDataset termsrc(ds);

    //
    // The sample point extractor expects everything to be in image space.
    //
    avtWorldSpaceToImageSpaceTransform trans(view, scale);
    trans.SetInput(termsrc.GetOutput());

    bool doKernel = 
        (GetInput()->GetInfo().GetAttributes().GetTopologicalDimension() == 0);
    avtSamplePointExtractor extractor(width, height, depth);
    extractor.SendCellsMode(false);
    extractor.Set3DMode(is3D);
    extractor.SetInput(trans.GetOutput());
    if (doKernel)
        extractor.SetKernelBasedSampling(true);
    avtSamplePoints_p samples = extractor.GetTypedOutput();

    //
    // If the selection this filter exists to create has already been handled,
    // or if there are no pieces for this processor to process, then we can skip
    // execution. But, take care to emulate the same collective
    // calls other processors may make before returning.
    //
    if (GetInput()->GetInfo().GetAttributes().GetSelectionApplied(selID))
    {
        debug1 << "Bypassing Resample operator because database plugin "
                  "claims to have applied the selection already" << endl;

        SetOutputDataTree(GetInputDataTree());

        // we can save a lot of time if we know everyone can bypass
        if (UnifyMaximumValue(0) == 0)
            return;

        // here is some dummied up code to match collective calls below
        int effectiveVars = samples->GetNumberOfRealVariables();
        double *ptrtmp = new double[width*height*depth];
        for (int jj = 0; jj < width*height*depth; jj++)
            ptrtmp[jj] = -FLT_MAX;
        for (i = 0 ; i < effectiveVars ; i++)
            Collect(ptrtmp, width*height*depth);
        delete [] ptrtmp;
        return;
    }
    else
    {
        UnifyMaximumValue(1);
    }

    //
    //
    // PROBLEM SIZED WORK OCCURS BEYOND THIS POINT
    // If you add (or remove) collective calls below this point, make sure to
    // put matching sequence into bypass code above
    //
    //

    avtSamplePointCommunicator communicator;
    avtImagePartition partition(width, height, PAR_Size(), PAR_Rank());
    communicator.SetImagePartition(&partition);
    bool doDistributedResample = false;
#ifdef PARALLEL
    doDistributedResample = atts.GetDistributedResample();
#endif

    if (doDistributedResample)
    {
        partition.SetShouldProduceOverlaps(true);
        avtDataObject_p dob;
        CopyTo(dob, samples);
        communicator.SetInput(dob);
        samples = communicator.GetTypedOutput();
    }

    // Always set up an arbitrator, even if user selected random.
    bool arbLessThan = !atts.GetUseArbitrator() || atts.GetArbitratorLessThan();
    std::string arbName = atts.GetArbitratorVarName();
    if (arbName == "default")
        arbName = primaryVariable;
    extractor.SetUpArbitrator(arbName, arbLessThan);

    //
    // Since this is Execute, forcing an update is okay...
    //
    samples->Update(GetGeneralContract());

    if (samples->GetInfo().GetValidity().HasErrorOccurred())
    {
        GetOutput()->GetInfo().GetValidity().ErrorOccurred();
        GetOutput()->GetInfo().GetValidity().SetErrorMessage(
                          samples->GetInfo().GetValidity().GetErrorMessage());
    }

    //
    // Create a rectilinear dataset that is stretched according to the 
    // original bounds.
    //
    int width_start  = 0;
    int width_end    = width;
    int height_start = 0;
    int height_end   = height;
    if (doDistributedResample)
    {
        partition.GetThisPartition(width_start, width_end, height_start, 
                                   height_end);
        width_end += 1;
        height_end += 1;
    }

    //
    // If we have more processors than domains, we have to handle that
    // gracefully.  Communicate how many variables there are so that those
    // that don't have data can play well.
    //
    int realVars  = samples->GetNumberOfRealVariables();
    int numArrays = realVars;
    if (doKernel)
        numArrays++;
    vtkDataArray **vars = new vtkDataArray*[numArrays];
    for (i = 0 ; i < numArrays ; i++)
    {
        vars[i] = vtkDoubleArray::New();
        if (doKernel && (i == numArrays-1))
            vars[i]->SetNumberOfComponents(1);
        else
        {
            vars[i]->SetNumberOfComponents(samples->GetVariableSize(i));
            vars[i]->SetName(samples->GetVariableName(i).c_str());
        }
    }

    if (doKernel)
        samples->GetVolume()->SetUseKernel(true);

    avtImagePartition *ip = NULL;
    if (doDistributedResample)
        ip = &partition;

    // We want all uncovered regions to get the default value.  That is
    // what the first argument of GetVariables is for.  But if the
    // default value is large, then it will screw up the collect call below,
    // which uses MPI_MAX for an all reduce.  So give uncovered regions very
    // small values now (-FLT_MAX) and then replace them later.
    double defaultPlaceholder = -FLT_MAX;
    samples->GetVolume()->GetVariables(defaultPlaceholder, vars, 
                                       numArrays, ip);

    if (!doDistributedResample)
    {
        //
        // Collect will perform the parallel collection.  Does nothing in
        // serial.  This will only be valid on processor 0.
        //
        for (i = 0 ; i < numArrays ; i++)
        {
            double *ptr = (double *) vars[i]->GetVoidPointer(0);
            Collect(ptr, vars[i]->GetNumberOfComponents()*width*height*depth);
        }
    }
    
    // Now replace the -FLT_MAX's with the default value.  (See comment above.)
    for (i = 0 ; i < numArrays ; i++)
    {
        int numTups = vars[i]->GetNumberOfComponents()
                    * vars[i]->GetNumberOfTuples();
        if (numTups > 0)
        {
            double *ptr = (double *) vars[i]->GetVoidPointer(0);
            for (j = 0 ; j < numTups ; j++)
                ptr[j] = (ptr[j] == defaultPlaceholder 
                                 ? atts.GetDefaultVal() 
                                 : ptr[j]);
        }
    }
   
    bool iHaveData = false;
    if (doDistributedResample)
        iHaveData = true;
    if (PAR_Rank() == 0)
        iHaveData = true;
    if (height_end > height)
        iHaveData = false;
    if (iHaveData)
    {
        vtkRectilinearGrid *rg = CreateGrid(bounds, width, height, depth,
                                        width_start, width_end, height_start,
                                        height_end, cellCenteredOutput, is3D);

        if (doKernel)
        {
            double min_weight = avtPointExtractor::GetMinimumWeightCutoff();
            vtkDataArray *weights = vars[numArrays-1];
            int numVals = weights->GetNumberOfTuples();
            for (i = 0 ; i < realVars ; i++)
            {
                for (j = 0 ; j < vars[i]->GetNumberOfComponents() ; j++)
                {
                    for (k = 0 ; k < numVals ; k++)
                    {
                        double weight = weights->GetTuple1(k);
                        if (weight <= min_weight)
                            vars[i]->SetComponent(k, j, atts.GetDefaultVal());
                        else
                            vars[i]->SetComponent(k, j, 
                                         vars[i]->GetComponent(k, j) / weight);
                    }
                }
            }
        }

        //
        // Attach these variables to our rectilinear grid.
        //
        for (i = 0 ; i < realVars ; i++)
        {
            const char *varname = vars[i]->GetName();
            if (strcmp(varname, primaryVariable) == 0)
            {
                if (vars[i]->GetNumberOfComponents() == 3)
                    if (cellCenteredOutput)
                        rg->GetCellData()->SetVectors(vars[i]);
                    else
                        rg->GetPointData()->SetVectors(vars[i]);
                else if (vars[i]->GetNumberOfComponents() == 1)
                {
                    if (cellCenteredOutput)
                    {
                        rg->GetCellData()->AddArray(vars[i]);
                        rg->GetCellData()->SetScalars(vars[i]);
                    }
                    else
                    {
                        rg->GetPointData()->AddArray(vars[i]);
                        rg->GetPointData()->SetScalars(vars[i]);
                    }
                }
                else
               {
                    if (cellCenteredOutput)
                        rg->GetCellData()->AddArray(vars[i]);
                    else
                        rg->GetPointData()->AddArray(vars[i]);
               }
            }
            else
            {
                if (cellCenteredOutput)
                    rg->GetCellData()->AddArray(vars[i]);
                else
                    rg->GetPointData()->AddArray(vars[i]);
            }
        }

        avtDataTree_p tree = new avtDataTree(rg, 0);
        rg->Delete();
        SetOutputDataTree(tree);
    }
    else
    {
        //
        // Putting in a NULL data tree can lead to seg faults, etc.
        //
        avtDataTree_p dummy = new avtDataTree();
        SetOutputDataTree(dummy);
    }

    for (i = 0 ; i < numArrays ; i++)
    {
        vars[i]->Delete();
    }
    delete [] vars;
}
Ejemplo n.º 9
0
void
avtSurfCompPrepFilter::Execute(void)
{
    avtDataObject_p dObj = GetInput();
    avtDataset_p ds;
    CopyTo(ds, dObj);
    avtSourceFromAVTDataset termsrc(ds);
    avtDataObject_p data = termsrc.GetOutput();

    //
    // To do our sampling, we will first convert to the appropriate coordinate
    // system -- cartesian, cylindrical, or spherical.
    //
    avtCoordSystemConvert toCoordSys;
    toCoordSys.SetInputCoordSys(avtCoordSystemConvert::CARTESIAN);
    switch (atts.GetCoordSystem())
    {
      case SurfCompPrepAttributes::Cartesian:
        toCoordSys.SetOutputCoordSys(avtCoordSystemConvert::CARTESIAN);
        break;
      case SurfCompPrepAttributes::Cylindrical:
        toCoordSys.SetOutputCoordSys(avtCoordSystemConvert::CYLINDRICAL);
        break;
      case SurfCompPrepAttributes::Spherical:
        toCoordSys.SetOutputCoordSys(avtCoordSystemConvert::SPHERICAL);
        break;
    }
    toCoordSys.SetInput(data);

    //
    // The extractor only extracts sample points within the view frustum
    // {(-1., 1.), (-1., 1.), (0., 1)}.  So transform our data into that space.
    //
    avtSimilarityTransformFilter transToFrustum;
    SimilarityTransformAttributes st_atts;
    st_atts.SetDoScale(1);
    st_atts.SetDoTranslate(1);
    switch (atts.GetCoordSystem())
    {
      case SurfCompPrepAttributes::Cartesian:
      {
        double X1 = atts.GetXStart();
        double X2 = atts.GetXStop();
        double center = (X1 + X2) / 2;
        double range = X2 - X1;
        range = (range == 0. ? 1 : range); 
        double scale = 2. / range;
        st_atts.SetScaleX(scale);
        st_atts.SetTranslateX(-center*scale);
        double Y1 = atts.GetYStart();
        double Y2 = atts.GetYStop();
        center = (Y1 + Y2) / 2;
        range = Y2 - Y1;
        range = (range == 0. ? 1 : range); 
        scale = 2. / range;
        st_atts.SetTranslateY(-center*scale);
        st_atts.SetScaleY(scale);
        double Z1 = atts.GetZStart();
        double Z2 = atts.GetZStop();
        center = (Z1 + Z2) / 2;
        range = Z2 - Z1;
        range = (range == 0. ? 1 : range); 
        scale = 1. / range;
        st_atts.SetScaleZ(scale);
        st_atts.SetTranslateZ(0.5-center*scale);
        break;
      }  
      case SurfCompPrepAttributes::Cylindrical:
      {
        double X1 = atts.GetThetaStart() * DegreesToRadians();
        double X2 = atts.GetThetaStop() * DegreesToRadians();
        double center = (X1 + X2) / 2;
        double range = X2 - X1;
        range = (range == 0. ? 1 : range); 
        double scale = 2. / range;
        st_atts.SetScaleX(scale);
        st_atts.SetTranslateX(-center*scale);
        double Y1 = atts.GetZStart();
        double Y2 = atts.GetZStop();
        center = (Y1 + Y2) / 2;
        range = Y2 - Y1;
        range = (range == 0. ? 1 : range); 
        scale = 2. / range;
        st_atts.SetScaleY(scale);
        st_atts.SetTranslateY(-center*scale);
        double Z1 = atts.GetStartRadius();
        double Z2 = atts.GetEndRadius();
        center = (Z1 + Z2) / 2;
        range = Z2 - Z1;
        range = (range == 0. ? 1 : range); 
        scale = 1. / range;
        st_atts.SetScaleZ(scale);
        st_atts.SetTranslateZ(0.5-center*scale);
        break;
      }
      case SurfCompPrepAttributes::Spherical:
      {
        double X1 = atts.GetThetaStart() * DegreesToRadians();
        double X2 = atts.GetThetaStop() * DegreesToRadians();
        double center = (X1 + X2) / 2;
        double range = X2 - X1;
        range = (range == 0. ? 1 : range); 
        double scale = 2. / range;
        st_atts.SetScaleX(scale);
        st_atts.SetTranslateX(-center*scale);
        double Y1 = atts.GetPhiStart() * DegreesToRadians();
        double Y2 = atts.GetPhiStop() * DegreesToRadians();
        center = (Y1 + Y2) / 2;
        range = Y2 - Y1;
        range = (range == 0. ? 1 : range); 
        scale = 2. / range;
        st_atts.SetScaleY(scale);
        st_atts.SetTranslateY(-center*scale);
        double Z1 = atts.GetStartRadius();
        double Z2 = atts.GetEndRadius();
        center = (Z1 + Z2) / 2;
        range = Z2 - Z1;
        range = (range == 0. ? 1 : range); 
        scale = 1. / range;
        st_atts.SetScaleZ(scale);
        st_atts.SetTranslateZ(0.5-center*scale);
        break;
      }
    }
    transToFrustum.SetAtts(&st_atts);
    transToFrustum.SetInput(toCoordSys.GetOutput());

    //
    // The sample point extractor will do the actually extracting.  The number
    // of samples in each dimension depends on the attributes and what
    // coordinate system we are in.
    //
    int numX = 0;
    int numY = 0;
    int numZ = 0;
    switch (atts.GetCoordSystem())
    {
      case SurfCompPrepAttributes::Cartesian:
      {
         numX = atts.GetXSteps();
         numY = atts.GetYSteps();
         numZ = atts.GetZSteps();
         break;
      }  
      case SurfCompPrepAttributes::Cylindrical:
      {
         numX = atts.GetThetaSteps();
         numY = atts.GetZSteps();
         numZ = atts.GetRadiusSteps();
         break;
      }
      case SurfCompPrepAttributes::Spherical:
      {
         numX = atts.GetThetaSteps();
         numY = atts.GetPhiSteps();
         numZ = atts.GetRadiusSteps();
         break;
      }
    }
    avtSamplePointExtractor extractor(numX, numY, numZ);
    extractor.SetInput(transToFrustum.GetOutput());
    avtDataObject_p dob = extractor.GetOutput();

    avtImagePartition imagePartition(numX, numY);

#ifdef PARALLEL
    //
    // If we are in parallel, we will need to communicate the sample points
    // so that we can correctly infer the surface in the next step.
    //
    avtSamplePointCommunicator sampCommunicator;
    sampCommunicator.SetImagePartition(&imagePartition);
    sampCommunicator.SetInput(dob);
    dob = sampCommunicator.GetOutput();
#else
    //
    // The sample communicator will make this gets called, so we only need
    // to call this if we are not using that module.
    //
    int *dummy = new int[numY];
    for (int i = 0 ; i < numY ; i++)
        dummy[i] = 500;
    imagePartition.EstablishPartitionBoundaries(dummy);
    delete [] dummy;
#endif

    //
    // The sample point to surface filter will determine a surface from the
    // sample points.  The surface can be the front surface, the back
    // surface, and the middle surface.  The output surface will still be
    // in the image frustum.
    // 
    SurfaceType st = NOT_SPECIFIED;
    switch (atts.GetSurfaceType())
    {
      case SurfCompPrepAttributes::Closest:
        st = FRONT_SURFACE;
        break;
      case SurfCompPrepAttributes::Average:
        st = MIDDLE_SURFACE;
        break;
      case SurfCompPrepAttributes::Farthest:
        st = BACK_SURFACE;
        break;
    }
    avtSamplePointToSurfaceFilter sampsToSurface;
    sampsToSurface.SetImagePartition(&imagePartition);
    sampsToSurface.SetSurfaceType(st);
    sampsToSurface.SetInput(dob);

    //
    // Now transform the data out of the image frustum and back into the
    // coordinate system space.
    //
    avtSimilarityTransformFilter outOfFrustum;
    st_atts.SetDoScale(1);
    st_atts.SetDoTranslate(1);
    switch (atts.GetCoordSystem())
    {
      case SurfCompPrepAttributes::Cartesian:
      {
        double X1 = atts.GetXStart();
        double X2 = atts.GetXStop();
        double center = (X1 + X2) / 2;
        double range = X2 - X1;
        range = (range == 0. ? 1 : range); 
        double scale = range / 2.;
        st_atts.SetTranslateX(center);
        st_atts.SetScaleX(scale);
        double Y1 = atts.GetYStart();
        double Y2 = atts.GetYStop();
        center = (Y1 + Y2) / 2;
        range = Y2 - Y1;
        range = (range == 0. ? 1 : range); 
        scale = range / 2.;
        st_atts.SetScaleY(scale);
        st_atts.SetTranslateY(center);
        double Z1 = atts.GetZStart();
        double Z2 = atts.GetZStop();
        center = (Z1 + Z2) / 2;
        range = Z2 - Z1;
        range = (range == 0. ? 1 : range); 
        scale = range;
        st_atts.SetScaleZ(scale);
        st_atts.SetTranslateZ(center - 0.5*scale);
        break;
      }  
      case SurfCompPrepAttributes::Cylindrical:
      {
        double X1 = atts.GetThetaStart() * DegreesToRadians();
        double X2 = atts.GetThetaStop() * DegreesToRadians();
        double center = (X1 + X2) / 2;
        double range = X2 - X1;
        range = (range == 0. ? 1 : range); 
        double scale = range / 2.;
        st_atts.SetScaleX(scale);
        st_atts.SetTranslateX(center);
        double Y1 = atts.GetZStart();
        double Y2 = atts.GetZStop();
        center = (Y1 + Y2) / 2;
        range = Y2 - Y1;
        range = (range == 0. ? 1 : range); 
        scale = range / 2.;
        st_atts.SetScaleY(scale);
        st_atts.SetTranslateY(center);
        double Z1 = atts.GetStartRadius();
        double Z2 = atts.GetEndRadius();
        center = (Z1 + Z2) / 2;
        range = Z2 - Z1;
        range = (range == 0. ? 1 : range); 
        scale = range;
        st_atts.SetScaleZ(scale);
        st_atts.SetTranslateZ(center - 0.5*scale);
        break;
      }
      case SurfCompPrepAttributes::Spherical:
      {
        double X1 = atts.GetThetaStart() * DegreesToRadians();
        double X2 = atts.GetThetaStop() * DegreesToRadians();
        double center = (X1 + X2) / 2;
        st_atts.SetTranslateX(center);
        double range = X2 - X1;
        range = (range == 0. ? 1 : range); 
        double scale = range / 2.;
        st_atts.SetScaleX(scale);
        double Y1 = atts.GetPhiStart() * DegreesToRadians();
        double Y2 = atts.GetPhiStop() * DegreesToRadians();
        center = (Y1 + Y2) / 2;
        st_atts.SetTranslateY(center);
        range = Y2 - Y1;
        range = (range == 0. ? 1 : range); 
        scale = range / 2.;
        st_atts.SetScaleY(scale);
        double Z1 = atts.GetStartRadius();
        double Z2 = atts.GetEndRadius();
        center = (Z1 + Z2) / 2;
        range = Z2 - Z1;
        range = (range == 0. ? 1 : range); 
        scale = range;
        st_atts.SetScaleZ(scale);
        st_atts.SetTranslateZ(center - 0.5*scale);
        break;
      }
    }
    outOfFrustum.SetAtts(&st_atts);
    outOfFrustum.SetInput(sampsToSurface.GetOutput());

    //
    // We are now back in the coordinate system.  Let's get back to Cartesian
    // coordinates.
    //
    avtCoordSystemConvert backToCartesian;
    switch (atts.GetCoordSystem())
    {
      case SurfCompPrepAttributes::Cartesian:
        backToCartesian.SetInputCoordSys(avtCoordSystemConvert::CARTESIAN);
        break;
      case SurfCompPrepAttributes::Cylindrical:
        backToCartesian.SetInputCoordSys(avtCoordSystemConvert::CYLINDRICAL);
        break;
      case SurfCompPrepAttributes::Spherical:
        backToCartesian.SetInputCoordSys(avtCoordSystemConvert::SPHERICAL);
        break;
    }
    backToCartesian.SetOutputCoordSys(avtCoordSystemConvert::CARTESIAN);
    backToCartesian.SetInput(outOfFrustum.GetOutput());

    //
    // The last few hundred lines of code have set up a network.  Now force
    // that network to execute.
    //
    backToCartesian.Update(GetGeneralContract());

    //
    // Now copy the output of that execution to be the output of this filter.
    //
    GetOutput()->Copy(*(backToCartesian.GetOutput()));
}
avtImage_p
avtVolumeFilter::RenderImage(avtImage_p opaque_image,
                             const WindowAttributes &window)
{
  if (atts.GetRendererType() == VolumeAttributes::RayCastingSLIVR){
        return RenderImageRaycastingSLIVR(opaque_image,window);
    }
    

    //
    // We need to create a dummy pipeline with the volume renderer that we
    // can force to execute within our "Execute".  Start with the source.
    //
    avtSourceFromAVTDataset termsrc(GetTypedInput());


    //
    // Set up the volume renderer.
    //
    avtRayTracer *software = new avtRayTracer;
    software->SetInput(termsrc.GetOutput());
    software->InsertOpaqueImage(opaque_image);
    software->SetRayCastingSLIVR(false);

    unsigned char vtf[4*256];
    atts.GetTransferFunction(vtf);
    avtOpacityMap om(256);
    if ((atts.GetRendererType() == VolumeAttributes::RayCasting) && (atts.GetSampling() == VolumeAttributes::Trilinear))
        om.SetTable(vtf, 256, atts.GetOpacityAttenuation()*2.0 - 1.0, atts.GetRendererSamples());
    else
        om.SetTable(vtf, 256, atts.GetOpacityAttenuation());
    double actualRange[2];
    bool artificialMin = atts.GetUseColorVarMin();
    bool artificialMax = atts.GetUseColorVarMax();
    if (!artificialMin || !artificialMax)
    {
        GetDataExtents(actualRange, primaryVariable);
        UnifyMinMax(actualRange, 2);
    }
    double range[2];
    range[0] = (artificialMin ? atts.GetColorVarMin() : actualRange[0]);
    range[1] = (artificialMax ? atts.GetColorVarMax() : actualRange[1]);
    if (atts.GetScaling() == VolumeAttributes::Log)
    {
        if (artificialMin)
            if (range[0] > 0)
                range[0] = log10(range[0]);
        if (artificialMax)
            if (range[1] > 0)
                range[1] = log10(range[1]);
    }
    else if (atts.GetScaling() == VolumeAttributes::Skew)
    {
        if (artificialMin)
        {
            double newMin = vtkSkewValue(range[0], range[0], range[1],
                                         atts.GetSkewFactor());
            range[0] = newMin;
        }
        if (artificialMax)
        {
            double newMax = vtkSkewValue(range[1], range[0], range[1],
                                         atts.GetSkewFactor());
            range[1] = newMax;
        }
    }
    om.SetMin(range[0]);
    om.SetMax(range[1]);

    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
    {
        if (!artificialMin)
            range[0] = 0.;
        if (!artificialMax)
        {
/* Don't need this code, because the rays will be in depth ... 0->1.
            double bounds[6];
            GetSpatialExtents(bounds);
            UnifyMinMax(bounds, 6);
            double diag = sqrt((bounds[1]-bounds[0])*(bounds[1]-bounds[0]) +
                               (bounds[3]-bounds[2])*(bounds[3]-bounds[2]) +
                               (bounds[5]-bounds[4])*(bounds[5]-bounds[4]));
            range[1] = (actualRange[1]*diag) / 2.;
 */
            range[1] = (actualRange[1]) / 4.;
        }
    }

    //
    // Determine which variables to use and tell the ray function.
    //
    VarList vl;
    avtDataset_p input = GetTypedInput();
    avtDatasetExaminer::GetVariableList(input, vl);
    int primIndex = -1;
    int opacIndex = -1;
    int gradIndex = -1;
    int count = 0;
    char gradName[128];
    const char *gradvar = atts.GetOpacityVariable().c_str();
    if (strcmp(gradvar, "default") == 0)
        gradvar = primaryVariable;
    // This name is explicitly sent to the avtGradientExpression in
    // the avtVolumePlot.
    SNPRINTF(gradName, 128, "_%s_gradient", gradvar);

    for (int i = 0 ; i < vl.nvars ; i++)
    {
        if ((strstr(vl.varnames[i].c_str(), "vtk") != NULL) &&
            (strstr(vl.varnames[i].c_str(), "avt") != NULL))
            continue;

        if (vl.varnames[i] == primaryVariable)
        {
            primIndex = count;
        }
        if (vl.varnames[i] == atts.GetOpacityVariable())
        {
            opacIndex = count;
        }
        if (vl.varnames[i] == gradName)
        {
            gradIndex = count;
        }
        count += vl.varsizes[i];
    }

    if (primIndex == -1)
    {
        if (vl.nvars <= 0)
        {
            debug1 << "Could not locate primary variable "
                   << primaryVariable << ", assuming that we are running "
                   << "in parallel and have more processors than domains."
                   << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException, primaryVariable);
        }
    }
    if (opacIndex == -1)
    {
        if (atts.GetOpacityVariable() == "default")
        {
            opacIndex = primIndex;
        }
        else if (vl.nvars <= 0)
        {
            debug1 << "Could not locate opacity variable "
                   << atts.GetOpacityVariable().c_str() << ", assuming that we "
                   << "are running in parallel and have more processors "
                   << "than domains." << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException,atts.GetOpacityVariable());
        }
    }
    if (  atts.GetRendererType() != VolumeAttributes::RayCastingIntegration &&
          atts.GetLightingFlag() &&
          gradIndex == -1)
    {
        if (vl.nvars <= 0)
        {
            debug1 << "Could not locate gradient variable, assuming that we "
                   << "are running in parallel and have more processors "
                   << "than domains." << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException,gradName);
        }
    }

    int newPrimIndex = UnifyMaximumValue(primIndex);
    if (primIndex >= 0 && newPrimIndex != primIndex)
    {
        //
        // We shouldn't ever have different orderings for our variables.
        //
        EXCEPTION1(InvalidVariableException, primaryVariable);
    }
    primIndex = newPrimIndex;

    int newOpacIndex = UnifyMaximumValue(opacIndex);
    if (opacIndex >= 0 && newOpacIndex != opacIndex)
    {
        //
        // We shouldn't ever have different orderings for our variables.
        //
        EXCEPTION1(InvalidVariableException, atts.GetOpacityVariable());
    }
    opacIndex = newOpacIndex;

    int newGradIndex = UnifyMaximumValue(gradIndex);
    if (gradIndex >= 0 && newGradIndex != gradIndex)
    {
        //
        // We shouldn't ever have different orderings for our variables.
        //
        EXCEPTION1(InvalidVariableException, gradName);
    }
    gradIndex = newGradIndex;

    //
    // Set up lighting
    //
    avtFlatLighting fl;
    avtLightingModel *lm = &fl;
    double gradMax = 0.0, lightingPower = 1.0;
    if (atts.GetLowGradientLightingReduction() != VolumeAttributes::Off)
    {
        gradMax = atts.GetLowGradientLightingClampValue();
        if (atts.GetLowGradientLightingClampFlag() == false)
        {
            double gradRange[2] = {0,0};
            GetDataExtents(gradRange, gradName);
            gradMax = gradRange[1];
        }
        switch (atts.GetLowGradientLightingReduction())
        {
          case VolumeAttributes::Lowest:   lightingPower = 1./16.; break;
          case VolumeAttributes::Lower:    lightingPower = 1./8.;  break;
          case VolumeAttributes::Low:      lightingPower = 1./4.;  break;
          case VolumeAttributes::Medium:   lightingPower = 1./2.;  break;
          case VolumeAttributes::High:     lightingPower = 1.;     break;
          case VolumeAttributes::Higher:   lightingPower = 2.;     break;
          case VolumeAttributes::Highest:  lightingPower = 4.;     break;
          default: break;
        }
    }
    avtPhong phong(gradMax, lightingPower);
    if (atts.GetLightingFlag())
    {
        lm = &phong;
    }
    else
    {
        lm = &fl;
    }

    avtOpacityMap *om2 = NULL;
    if (primIndex == opacIndex)
    {
        // Note that we are forcing the color variables range onto the
        // opacity variable.
        om2 = &om;
    }
    else
    {
        om2 = new avtOpacityMap(256);
        om2->SetTable(vtf, 256, atts.GetOpacityAttenuation());
        double range[2];

        bool artificialMin = atts.GetUseOpacityVarMin();
        bool artificialMax = atts.GetUseOpacityVarMax();
        if (!artificialMin || !artificialMax)
        {
            InputSetActiveVariable(atts.GetOpacityVariable().c_str());
            avtDatasetExaminer::GetDataExtents(input, range);
            UnifyMinMax(range, 2);
            InputSetActiveVariable(primaryVariable);
        }
        range[0] = (artificialMin ? atts.GetOpacityVarMin() : range[0]);
        range[1] = (artificialMax ? atts.GetOpacityVarMax() : range[1]);
        om2->SetMin(range[0]);
        om2->SetMax(range[1]);
        // LEAK!!
    }
    avtCompositeRF *compositeRF = new avtCompositeRF(lm, &om, om2);
    if (atts.GetRendererType() == VolumeAttributes::RayCasting && atts.GetSampling() == VolumeAttributes::Trilinear){
        compositeRF->SetTrilinearSampling(true);
        double *matProp = atts.GetMaterialProperties();
        double materialPropArray[4];
        materialPropArray[0] = matProp[0];
        materialPropArray[1] = matProp[1];
        materialPropArray[2] = matProp[2];
        materialPropArray[3] = matProp[3];
        compositeRF->SetMaterial(materialPropArray);
    }
    else
        compositeRF->SetTrilinearSampling(false);
    avtIntegrationRF *integrateRF = new avtIntegrationRF(lm);

    compositeRF->SetColorVariableIndex(primIndex);
    compositeRF->SetOpacityVariableIndex(opacIndex);
    if (atts.GetLightingFlag())
        compositeRF->SetGradientVariableIndex(gradIndex);
    integrateRF->SetPrimaryVariableIndex(primIndex);
    integrateRF->SetRange(range[0], range[1]);
    if (atts.GetSampling() == VolumeAttributes::KernelBased)
    {
        software->SetKernelBasedSampling(true);
        compositeRF->SetWeightVariableIndex(count);
    }

    if (atts.GetRendererType() == VolumeAttributes::RayCasting && atts.GetSampling() == VolumeAttributes::Trilinear)
        software->SetTrilinear(true);
    else
        software->SetTrilinear(false);
    
    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
        software->SetRayFunction(integrateRF);
    else
        software->SetRayFunction(compositeRF);

    software->SetSamplesPerRay(atts.GetSamplesPerRay());

    const int *size = window.GetSize();
    software->SetScreen(size[0], size[1]);

    const View3DAttributes &view = window.GetView3D();
    avtViewInfo vi;
    CreateViewInfoFromViewAttributes(vi, view);

    avtDataObject_p inputData = GetInput();
    int width_,height_,depth_;
    if (GetLogicalBounds(inputData, width_,height_,depth_))      
    {
        // if we have logical bounds, compute the slices automatically
        double viewDirection[3];
        int numSlices;
        
        viewDirection[0] = (view.GetViewNormal()[0] > 0)? view.GetViewNormal()[0]: -view.GetViewNormal()[0];
        viewDirection[1] = (view.GetViewNormal()[1] > 0)? view.GetViewNormal()[1]: -view.GetViewNormal()[1];
        viewDirection[2] = (view.GetViewNormal()[2] > 0)? view.GetViewNormal()[2]: -view.GetViewNormal()[2];

        numSlices = (width_*viewDirection[0] + height_*viewDirection[1] + depth_*viewDirection[2]) * atts.GetRendererSamples();

        if (atts.GetRendererType() == VolumeAttributes::RayCasting && atts.GetSampling() == VolumeAttributes::Trilinear)
            software->SetSamplesPerRay(numSlices);
    }
    software->SetView(vi);
    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
    {
        integrateRF->SetDistance(view.GetFarPlane()-view.GetNearPlane());
        integrateRF->SetWindowSize(size[0], size[1]);
    }

    double view_dir[3];
    view_dir[0] = vi.focus[0] - vi.camera[0];
    view_dir[1] = vi.focus[1] - vi.camera[1];
    view_dir[2] = vi.focus[2] - vi.camera[2];
    double mag = sqrt(view_dir[0]*view_dir[0] + view_dir[1]*view_dir[1]
                      + view_dir[2]*view_dir[2]);
    if (mag != 0.) // only 0 if focus and camera are the same
    {
        view_dir[0] /= mag;
        view_dir[1] /= mag;
        view_dir[2] /= mag;
    }
    lm->SetViewDirection(view_dir);
    lm->SetViewUp(vi.viewUp);
    lm->SetLightInfo(window.GetLights());
    const RenderingAttributes &render_atts = window.GetRenderAtts();
    if (render_atts.GetSpecularFlag())
    {
        lm->SetSpecularInfo(render_atts.GetSpecularFlag(),
                            render_atts.GetSpecularCoeff(),
                            render_atts.GetSpecularPower());
    }

    //
    // Set the volume renderer's background color and mode from the
    // window attributes.
    //
    software->SetBackgroundMode(window.GetBackgroundMode());
    software->SetBackgroundColor(window.GetBackground());
    software->SetGradientBackgroundColors(window.GetGradBG1(),
                                          window.GetGradBG2());

    //
    // We have to set up a sample point "arbitrator" to allow small cells
    // to be included in the final picture.
    //
    avtOpacityMapSamplePointArbitrator arb(om2, opacIndex);
    avtRay::SetArbitrator(&arb);

    //
    // Do the funny business to force an update.
    //
    avtDataObject_p dob = software->GetOutput();
    dob->Update(GetGeneralContract());

    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
        integrateRF->OutputRawValues("integration.data");

    //
    // Free up some memory and clean up.
    //
    delete software;
    avtRay::SetArbitrator(NULL);
    delete compositeRF;
    delete integrateRF;

    //
    // Copy the output of the volume renderer to our output.
    //
    avtImage_p output;
    CopyTo(output, dob);
    return  output;
}
avtImage_p
avtVolumeFilter::RenderImageRaycastingSLIVR(avtImage_p opaque_image,
                             const WindowAttributes &window)
{
    //
    // We need to create a dummy pipeline with the volume renderer that we
    // can force to execute within our "Execute".  Start with the source.
    //
    avtSourceFromAVTDataset termsrc(GetTypedInput());

    //
    // Set up the volume renderer.
    //
    avtRayTracer *software = new avtRayTracer;
    software->SetInput(termsrc.GetOutput());
    software->InsertOpaqueImage(opaque_image);

    unsigned char vtf[4*256];
    atts.GetTransferFunction(vtf);
    avtOpacityMap om(256);

    om.SetTableFloat(vtf, 256, atts.GetOpacityAttenuation()*2.0 - 1.0, atts.GetRendererSamples());
    
    double actualRange[2];
    bool artificialMin = atts.GetUseColorVarMin();
    bool artificialMax = atts.GetUseColorVarMax();
    if (!artificialMin || !artificialMax)
    {
        GetDataExtents(actualRange, primaryVariable);
        UnifyMinMax(actualRange, 2);
    }

    double range[2];
    range[0] = (artificialMin ? atts.GetColorVarMin() : actualRange[0]);
    range[1] = (artificialMax ? atts.GetColorVarMax() : actualRange[1]);

    if (atts.GetScaling() == VolumeAttributes::Log)
    {
        if (artificialMin)
            if (range[0] > 0)
                range[0] = log10(range[0]);
        if (artificialMax)
            if (range[1] > 0)
                range[1] = log10(range[1]);
    }
    else if (atts.GetScaling() == VolumeAttributes::Skew)
    {
        if (artificialMin)
        {
            double newMin = vtkSkewValue(range[0], range[0], range[1],
                                         atts.GetSkewFactor());
            range[0] = newMin;
        }
        if (artificialMax)
        {
            double newMax = vtkSkewValue(range[1], range[0], range[1],
                                         atts.GetSkewFactor());
            range[1] = newMax;
        }
    }
    om.SetMin(range[0]);
    om.SetMax(range[1]);
    
   

    //
    // Determine which variables to use and tell the ray function.
    //
    VarList vl;
    avtDataset_p input = GetTypedInput();
    avtDatasetExaminer::GetVariableList(input, vl);

    int primIndex = -1;
    int opacIndex = -1;

    int count = 0;
    char gradName[128];
    const char *gradvar = atts.GetOpacityVariable().c_str();
    if (strcmp(gradvar, "default") == 0)
        gradvar = primaryVariable;
    // This name is explicitly sent to the avtGradientExpression in
    // the avtVolumePlot.
    SNPRINTF(gradName, 128, "_%s_gradient", gradvar);

    for (int i = 0 ; i < vl.nvars ; i++)
    {
        if ((strstr(vl.varnames[i].c_str(), "vtk") != NULL) &&
            (strstr(vl.varnames[i].c_str(), "avt") != NULL))
            continue;

        if (vl.varnames[i] == primaryVariable)
        {
            primIndex = count;
        }
        if (vl.varnames[i] == atts.GetOpacityVariable())
        {
            opacIndex = count;
        }
       // if (vl.varnames[i] == gradName)
       // {
       //     gradIndex = count;
       // }
        count += vl.varsizes[i];
    }

    if (primIndex == -1)
    {
        if (vl.nvars <= 0)
        {
            debug1 << "Could not locate primary variable "
                   << primaryVariable << ", assuming that we are running "
                   << "in parallel and have more processors than domains."
                   << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException, primaryVariable);
        }
    }
    if (opacIndex == -1)
    {
        if (atts.GetOpacityVariable() == "default")
        {
            opacIndex = primIndex;
        }
        else if (vl.nvars <= 0)
        {
            debug1 << "Could not locate opacity variable "
                   << atts.GetOpacityVariable().c_str() << ", assuming that we "
                   << "are running in parallel and have more processors "
                   << "than domains." << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException,atts.GetOpacityVariable());
        }
    }
   

    //
    // Set up lighting
    //
    avtFlatLighting fl;
    avtLightingModel *lm = &fl;

    if (atts.GetLightingFlag())
        software->SetLighting(true);
    else
        software->SetLighting(false);



    avtCompositeRF *compositeRF = new avtCompositeRF(lm, &om, &om);
    
    double *matProp = atts.GetMaterialProperties();
    double materialPropArray[4];
    materialPropArray[0] = matProp[0];
    materialPropArray[1] = matProp[1];
    materialPropArray[2] = matProp[2];
    materialPropArray[3] = matProp[3];

    software->SetMatProperties(materialPropArray);

    software->SetRayCastingSLIVR(true);
    software->SetTrilinear(false);

    software->SetTransferFn(&om);
    software->SetRayFunction(compositeRF);            // unsure about this one. RayFunction seems important
    software->SetSamplesPerRay(atts.GetSamplesPerRay());

    const int *size = window.GetSize();
    software->SetScreen(size[0], size[1]);

    const View3DAttributes &view = window.GetView3D();
    avtViewInfo vi;
    CreateViewInfoFromViewAttributes(vi, view);

    avtDataObject_p inputData = GetInput();
    int width_,height_,depth_;
    if (GetLogicalBounds(inputData, width_,height_,depth_))      
    {
        double viewDirection[3];
        int numSlices;
        
        viewDirection[0] = (view.GetViewNormal()[0] > 0)? view.GetViewNormal()[0]: -view.GetViewNormal()[0];
        viewDirection[1] = (view.GetViewNormal()[1] > 0)? view.GetViewNormal()[1]: -view.GetViewNormal()[1];
        viewDirection[2] = (view.GetViewNormal()[2] > 0)? view.GetViewNormal()[2]: -view.GetViewNormal()[2];

        numSlices = (width_*viewDirection[0] + height_*viewDirection[1] + depth_*viewDirection[2]) * atts.GetRendererSamples();

        software->SetSamplesPerRay(numSlices);
        debug5 << "RayCastingSLIVR - slices: "<< numSlices << " : " << width_ << " ,  " << height_  << " , " << depth_ << endl;
    }

    software->SetView(vi);

    double view_dir[3];
    view_dir[0] = vi.focus[0] - vi.camera[0];
    view_dir[1] = vi.focus[1] - vi.camera[1];
    view_dir[2] = vi.focus[2] - vi.camera[2];
    double mag = sqrt(view_dir[0]*view_dir[0] + view_dir[1]*view_dir[1]
                      + view_dir[2]*view_dir[2]);
    if (mag != 0.) // only 0 if focus and camera are the same
    {
        view_dir[0] /= mag;
        view_dir[1] /= mag;
        view_dir[2] /= mag;
    }
    software->SetViewDirection(view_dir);
    software->SetViewUp(vi.viewUp);
    
    double tempLightDir[3];
    tempLightDir[0] = ((window.GetLights()).GetLight(0)).GetDirection()[0];
    tempLightDir[1] = ((window.GetLights()).GetLight(0)).GetDirection()[1];
    tempLightDir[2] = ((window.GetLights()).GetLight(0)).GetDirection()[2];
    software->SetLightDirection(tempLightDir);


    vtkCamera *camera = vtkCamera::New();
    vi.SetCameraFromView(camera);
    vtkMatrix4x4 *cameraMatrix = camera->GetViewTransformMatrix();

    double modelViewMatrix[16];
    modelViewMatrix[0] = cameraMatrix->GetElement(0,0);
    modelViewMatrix[1] = cameraMatrix->GetElement(0,1);
    modelViewMatrix[2] = cameraMatrix->GetElement(0,2);
    modelViewMatrix[3] = cameraMatrix->GetElement(0,3);

    modelViewMatrix[4] = cameraMatrix->GetElement(1,0);
    modelViewMatrix[5] = cameraMatrix->GetElement(1,1);
    modelViewMatrix[6] = cameraMatrix->GetElement(1,2);
    modelViewMatrix[7] = cameraMatrix->GetElement(1,3);

    modelViewMatrix[8]  = cameraMatrix->GetElement(2,0);
    modelViewMatrix[9]  = cameraMatrix->GetElement(2,1);
    modelViewMatrix[10] = cameraMatrix->GetElement(2,2);
    modelViewMatrix[11] = cameraMatrix->GetElement(2,3);

    modelViewMatrix[12] = cameraMatrix->GetElement(3,0);
    modelViewMatrix[13] = cameraMatrix->GetElement(3,1);
    modelViewMatrix[14] = cameraMatrix->GetElement(3,2);
    modelViewMatrix[15] = cameraMatrix->GetElement(3,3);
    software->SetModelViewMatrix(modelViewMatrix);

    //
    // Set the volume renderer's background color and mode from the
    // window attributes.
    //
    software->SetBackgroundMode(window.GetBackgroundMode());
    software->SetBackgroundColor(window.GetBackground());
    software->SetGradientBackgroundColors(window.GetGradBG1(),
                                          window.GetGradBG2());

    //
    // Do the funny business to force an update. ... and called avtDataObject
    //
    avtDataObject_p dob = software->GetOutput();
    dob->Update(GetGeneralContract());

    //
    // Free up some memory and clean up.
    //
    delete software;
    avtRay::SetArbitrator(NULL);

    delete compositeRF;

    //
    // Copy the output of the volume renderer to our output.
    //
    avtImage_p output;
    CopyTo(output, dob);
    return  output;
}
Ejemplo n.º 12
0
void
avtCracksClipperFilter::Execute(void)
{
#ifdef ENGINE
    //
    // Create an artificial pipeline. 
    //
    avtDataObject_p dObj = GetInput();
    avtDataset_p ds;
    CopyTo(ds, dObj);
    avtSourceFromAVTDataset termsrc(ds);
    avtDataObject_p data = termsrc.GetOutput();

    //
    // Do the work of removing cracks
    //
    avtRemoveCracksFilter removeCracks;
    removeCracks.SetAtts(&atts);
    removeCracks.SetInput(data);

    if (calculateDensity)
    {
        //
        // Calculate volume for the new cells 
        //
        avtVMetricVolume volume; 
        volume.SetOutputVariableName("ccvol");
        volume.SetInput(removeCracks.GetOutput());
        volume.UseVerdictHex(false);
 
        //
        // Calculate density
        //
        avtCracksDensityFilter density;
        density.SetInput(volume.GetOutput());
        density.SetVarName(varname);

        //
        // Force the network to execute
        //
        density.Update(GetGeneralContract());

        //
        // Copy output of the exectuion to the output of this fitler.
        //
        GetOutput()->Copy(*(density.GetOutput()));
    }
    else
    {
        //
        // Force the network to execute
        //
        removeCracks.Update(GetGeneralContract());

        //
        // Copy output of the exectuion to the output of this fitler.
        //
        GetOutput()->Copy(*(removeCracks.GetOutput()));
    }
#endif
}
Ejemplo n.º 13
0
void
avtTraceHistoryFilter::PerformIteration(int time, avtDataset_p &ds, bool doDisp)
{
    int   i;

    // Store off the original expression list.
    ParsingExprList *pel = ParsingExprList::Instance();
    ExpressionList orig_list = *(pel->GetList());

    avtContract_p spec = GetGeneralContract();
    avtDataRequest_p dataRequest = spec->GetDataRequest();
    int timestep = dataRequest->GetTimestep();

    int newTimeIndex = timestep-time;
    if (newTimeIndex < 0)
    {
        EXCEPTION1(VisItException, "You asked to trace back before the first"
                      " dump.");
    }

    ExpressionList new_list = *(pel->GetList());
    std::vector<std::string> vars = atts.GetVars();
    for (i = 0 ; i < vars.size () ; i++)
    {
        AddExpression(new_list, vars[i], newTimeIndex);
    }
    if (doDisp)
        AddExpression(new_list, atts.GetDisplacement(), newTimeIndex);
    *(pel->GetList()) = new_list;

    avtSourceFromAVTDataset termsrc(ds);
    avtDataObject_p data = termsrc.GetOutput();

    avtExpressionEvaluatorFilter eef;
    eef.SetInput(data);
    
    avtDataRequest_p dataRequest2 = new avtDataRequest(dataRequest);
    for (i = 0 ; i < vars.size() ; i++)
    {
        char exp_name[1024];
        SNPRINTF(exp_name, 1024, "AVT_TRACE_HIST_%s", vars[i].c_str());
        dataRequest2->AddSecondaryVariable(exp_name);
    }

    if (doDisp)
    {
        char disp_name[1024];
        SNPRINTF(disp_name, 1024, "AVT_TRACE_HIST_%s", 
                                               atts.GetDisplacement().c_str());

        avtDisplaceFilter disp;
        disp.SetInput(eef.GetOutput());

        disp.SetVariable(disp_name);
        disp.SetFactor(1.);
        dataRequest2->AddSecondaryVariable(disp_name);

        avtContract_p spec2 = new avtContract(spec, 
                                                                       dataRequest2);
        disp.Update(spec2);
        ds = disp.GetTypedOutput();
    }
    else
    {
        avtContract_p spec2 = new avtContract(spec, 
                                                                       dataRequest2);
        eef.Update(spec2);
        ds = eef.GetTypedOutput();
    }

    // Restore the original expression list ... i.e. undo the temporary 
    // expressions we put in.
    *(pel->GetList()) = orig_list;
}