void
avtSamplePointCommunicator::EstablishImagePartitionBoundaries(void)
{
    avtVolume *involume      = GetTypedInput()->GetVolume();
    int        height        = involume->GetVolumeHeight();

    int       *samplesPerScanline = new int[height];
    for (int i = 0 ; i < height ; i++)
    {
        samplesPerScanline[i] = 0;
    }

    //
    // Determine how many samples we have in the sample points.
    //
    involume->EstimateNumberOfSamplesPerScanline(samplesPerScanline);

    //
    // Estimate how many samples we have in the cells.
    //
    avtCellList *incl  = GetTypedInput()->GetCellList();
    incl->EstimateNumberOfSamplesPerScanline(samplesPerScanline);

    //
    // Now let the image partition determines what the right boundaries is
    // so that each partition has approximately the same number of sample
    // points.
    //
    imagePartition->EstablishPartitionBoundaries(samplesPerScanline);
    
    delete [] samplesPerScanline;
}
void
avtActualExtentsFilter::UpdateExtents(void)
{
    int t1 = visitTimer->StartTimer();
    avtDataAttributes &atts = GetInput()->GetInfo().GetAttributes();
    avtDataAttributes &outAtts = GetOutput()->GetInfo().GetAttributes();
    avtDataset_p ds = GetTypedInput();

    int nVars = atts.GetNumberOfVariables();
    double de[2];
    for (int i = 0 ; i < nVars ; i++)
    {
        const char *vname = atts.GetVariableName(i).c_str();
        if (! lastContract->ShouldCalculateVariableExtents(vname))
            continue;
    
        bool foundDE = avtDatasetExaminer::GetDataExtents(ds, de, vname);
        if (foundDE)
        {
            outAtts.GetThisProcsActualDataExtents(vname)->Merge(de);
        }
    }

    if (lastContract->ShouldCalculateMeshExtents())
    {
        double se[6];
        bool foundSE = avtDatasetExaminer::GetSpatialExtents(ds, se);
        if (foundSE)
        {
            outAtts.GetThisProcsActualSpatialExtents()->Merge(se);
        }
    }
    visitTimer->StopTimer(t1, "Calculating the actual extents");
}
void
avtDatasetToDataObjectFilter::SearchDataForDataExtents(double *extents,
                                                       const char *varname)
{
    avtDataset_p input = GetTypedInput();
    avtDatasetExaminer::GetDataExtents(input, extents, varname);
}
Example #4
0
void
avtTraceHistoryFilter::Execute(void)
{
    //
    // Write out the connectivity.  "-1" is the hint to do that.
    //
    OutputTime(GetTypedInput(), -1);

    //
    // Pull out just the points.
    //
    avtDataset_p ds = InitializeDataset();

    //
    // Put the fields from the current time step on the points and then
    // output those fields.
    //
    PerformIteration(0, ds, false);
    OutputTime(ds, 0);

    //
    // Now iterate through the time slices, displacing and evaluating.
    //
    for (int i = 0 ; i < atts.GetNumiter() ; i++)
    {
        ds->SetSource(GetInput()->GetSource());
        PerformIteration(i, ds, true);
        OutputTime(ds, i+1);
    }

    // The operator just passes the data through.
    GetDataTree() = GetInputDataTree();
}
void
avtOriginalDataSpatialExtentsQuery::PerformQuery(QueryAttributes *qA)
{
    queryAtts = *qA;
    Init(); 

    std::string floatFormat = queryAtts.GetFloatFormat();
    std::string format ="";
    UpdateProgress(0, 0);

    avtDataObject_p dob = ApplyFilters(GetInput());

    SetTypedInput(dob);

    avtDataset_p input = GetTypedInput();
    double extents[6] = {0., 0., 0., 0., 0., 0.};
    char msg[1024];

    avtDatasetExaminer::GetSpatialExtents(input, extents);
    UnifyMinMax(extents, 6);
    int dim = input->GetInfo().GetAttributes().GetSpatialDimension();
    if (dim == 1)
    {
        format = "The original extents are (" + floatFormat + ", " 
                                              + floatFormat + ")";
        SNPRINTF(msg, 1024,format.c_str(), 
                extents[0], extents[1]);
    }
    else if (dim == 2)
    {
        format = "The original extents are (" + floatFormat + ", " 
                                              + floatFormat + ", " 
                                              + floatFormat + ", " 
                                              + floatFormat + ")";
        SNPRINTF(msg, 1024, format.c_str(), 
            extents[0], extents[1], extents[2], extents[3]);
    }
    else if (dim == 3)
    {
        format = "The original extents are (" + floatFormat + ", " 
                                              + floatFormat + ", " 
                                              + floatFormat + ", " 
                                              + floatFormat + ", " 
                                              + floatFormat + ", " 
                                              + floatFormat + ")";
       SNPRINTF(msg, 1024, format.c_str(),
        extents[0], extents[1], extents[2], extents[3], extents[4], extents[5]);
    }
    doubleVector d;
    for (int i = 0 ; i < 2*dim ; i++)
        d.push_back(extents[i]);
    qA->SetResultsMessage(msg);
    qA->SetResultsValue(d);

    UpdateProgress(1, 0);
}
void
avtLineScanQuery::Execute(avtDataTree_p tree)
{
    avtDataset_p input = GetTypedInput();

    int numPasses = numLines / numLinesPerIteration;
    if (numLines % numLinesPerIteration != 0)
        numPasses++;

    avtContract_p contract =
        input->GetOriginatingSource()->GetGeneralContract();
    if (GetInput()->GetInfo().GetAttributes().ValidActiveVariable())
        varname = GetInput()->GetInfo().GetAttributes().GetVariableName();
    else 
        varname = contract->GetDataRequest()->GetVariable();

    for (int i = 0 ; i < numPasses ; i++)
    {
        int numForLast = (numLines % numLinesPerIteration);
        numForLast = (numForLast == 0 ? numLinesPerIteration : numForLast);
        int linesForThisPass = (i == numPasses-1  ? numForLast 
                                                  : numLinesPerIteration);

        //
        // Create an artificial pipeline.
        //
        avtDataset_p ds;
        CopyTo(ds, input);
        avtSourceFromAVTDataset termsrc(ds);
        avtDataObject_p dob = termsrc.GetOutput();
    
        avtLineScanFilter *filt = CreateLineScanFilter();
        filt->SetNumberOfLines(linesForThisPass);
        filt->SetRandomSeed(i);
        filt->SetInput(dob);
        //
        // Cause our artificial pipeline to execute.
        //
        filt->GetOutput()->Update(contract);
        lines = filt->GetLines();

        avtDataset_p ds2 = filt->GetTypedOutput();
        avtDataTree_p tree = ds2->GetDataTree();
        ExecuteTree(tree);
        lines = NULL;
        delete filt;

        //
        // Reset the timeout for the next iteration.
        //
        avtCallback::ResetTimeout(60*5);
    }
}
void
avtLegacyStreamlineFilter::PreExecute(void)
{
    avtDataTreeIterator::PreExecute();

    // If we have a box source and we are using the whole box, then plug
    // the current spatial extents into the box extents.
    if (sourceType == STREAMLINE_SOURCE_BOX && useWholeBox)
    {
        avtDataset_p input = GetTypedInput();
        avtDatasetExaminer::GetSpatialExtents(input, boxExtents);
    }
}
void
avtVectorGlyphMapper::SetScale(double s)
{
    scale = s;

    //
    // If auto scale is enable, then set the scale based on the spatial
    // extents and possibly the data extents.
    //
    if (autoScale)
    {
        avtDataset_p input = GetTypedInput();
        if (*input != 0)
        {
            avtDataAttributes &atts=input->GetInfo().GetAttributes();
            avtExtents *extents = atts.GetOriginalSpatialExtents();
            int nDims = extents->GetDimension();
            double exts[6];
            extents->CopyTo(exts);
            double dist = 0.;
            int i;
            for (i = 0; i < nDims; i++)
            {
                dist += (exts[2*i+1] - exts[2*i]) * (exts[2*i+1] - exts[2*i]);
            }
            dist = sqrt(dist);

            extents = atts.GetOriginalDataExtents();
            extents->CopyTo(exts);

            if (scaleByMagnitude)
                scale = (scale * dist * 0.2) / exts[1];
            else
                scale = scale * dist * 0.2;
        }
    }

    if (glyphFilter != NULL)
    {
        for (int i = 0 ; i < nGlyphFilters ; i++)
        {
            if (glyphFilter[i] != NULL)
            {
                glyphFilter[i]->SetScaleFactor(scale);
            }
        }
    }
}
Example #9
0
avtDataset_p
avtTraceHistoryFilter::InitializeDataset(void)
{
    int  i, j;

    avtDataTree_p inTree = GetInputDataTree();
    int nLeaves;
    vtkDataSet **ds = inTree->GetAllLeaves(nLeaves);
    vtkDataSet **ugrids = new vtkDataSet*[nLeaves];
    for (i = 0 ; i < nLeaves ; i++)
    {
        vtkPoints *pts = vtkVisItUtility::GetPoints(ds[i]);

        vtkUnstructuredGrid *ugrid = vtkUnstructuredGrid::New();
        ugrid->SetPoints(pts);
        int npts = pts->GetNumberOfPoints();
        ugrid->Allocate(2*npts);
        vtkIdType onevertex[1];
        for (j = 0 ; j < npts ; j++)
        {
             onevertex[0] = j;
             ugrid->InsertNextCell(VTK_VERTEX, 1, onevertex);
        }
        ugrids[i] = ugrid;

        pts->Delete();
    }

    avtDataTree_p newTree = new avtDataTree(nLeaves, ugrids, -1);
    for (i = 0 ; i < nLeaves ; i++)
        ugrids[i]->Delete();
    delete [] ugrids;

    avtDataset_p rv = new avtDataset(GetTypedInput(), newTree);
    avtDataAttributes &atts = rv->GetInfo().GetAttributes();
    int nVars = atts.GetNumberOfVariables();
    while (nVars > 0)
    {
        atts.RemoveVariable(atts.GetVariableName(0));
        nVars = atts.GetNumberOfVariables();
    }

    // Free the memory from the GetAllLeaves function call.
    delete [] ds;

    return rv;
}
avtContract_p
avtSurfaceFilter::ModifyContract(avtContract_p spec)
{
    double dataExtents[2];
    double spatialExtents[6];
    if (TryDataExtents(dataExtents))
    {
        avtDataset_p input = GetTypedInput();
        avtDatasetExaminer::GetSpatialExtents(input, spatialExtents);
        UnifyMinMax(spatialExtents,6);
        CalculateScaleValues(dataExtents, spatialExtents);
        stillNeedExtents = false;
    }
    else
    {
        spec->NoStreaming();
    }
    if (spec->GetDataRequest()->MayRequireZones()) 
    {
        spec->GetDataRequest()->TurnZoneNumbersOn();
    }
    if (spec->GetDataRequest()->MayRequireNodes()) 
    {
        spec->GetDataRequest()->TurnNodeNumbersOn();
    }

    //
    // We will need the ghost zones so that we can interpolate along domain
    // boundaries and get no cracks in our isosurface.
    //
    const char *varname = spec->GetDataRequest()->GetVariable();
    avtDataAttributes &in_atts = GetInput()->GetInfo().GetAttributes();
    bool skipGhost = false;
    if (in_atts.ValidVariable(varname) &&
        in_atts.GetCentering(varname) == AVT_NODECENT)
        skipGhost = true;
    if (!skipGhost)
        spec->GetDataRequest()->SetDesiredGhostDataType(GHOST_ZONE_DATA);

    return spec;
}
void
avtSurfaceFilter::PreExecute(void)
{
    avtDataTreeIterator::PreExecute();

    if (stillNeedExtents)
    {
        const char *varname = pipelineVariable;
        if (atts.GetVariable() != "default")
            varname = atts.GetVariable().c_str();
        double dataExtents[2];
        double spatialExtents[6];
        GetDataExtents(dataExtents, varname);
        avtDataset_p input = GetTypedInput();
        avtDatasetExaminer::GetSpatialExtents(input, spatialExtents);
        UnifyMinMax(spatialExtents,6);
        CalculateScaleValues(dataExtents, spatialExtents);
    }

    zValMin = +FLT_MAX;
    zValMax = -FLT_MAX;

    haveIssuedWarning = false;
}
void
avtTerminatingDatasetSink::StreamingCleanUp(void)
{
    avtDataset_p dataset = GetTypedInput();
    dataset->Compact();
}
void
avtDatasetToDataObjectFilter::SearchDataForSpatialExtents(double *extents)
{
    avtDataset_p input = GetTypedInput();
    avtDatasetExaminer::GetSpatialExtents(input, extents);
}
avtImage_p
avtVolumeFilter::RenderImage(avtImage_p opaque_image,
                             const WindowAttributes &window)
{
  if (atts.GetRendererType() == VolumeAttributes::RayCastingSLIVR){
        return RenderImageRaycastingSLIVR(opaque_image,window);
    }
    

    //
    // We need to create a dummy pipeline with the volume renderer that we
    // can force to execute within our "Execute".  Start with the source.
    //
    avtSourceFromAVTDataset termsrc(GetTypedInput());


    //
    // Set up the volume renderer.
    //
    avtRayTracer *software = new avtRayTracer;
    software->SetInput(termsrc.GetOutput());
    software->InsertOpaqueImage(opaque_image);
    software->SetRayCastingSLIVR(false);

    unsigned char vtf[4*256];
    atts.GetTransferFunction(vtf);
    avtOpacityMap om(256);
    if ((atts.GetRendererType() == VolumeAttributes::RayCasting) && (atts.GetSampling() == VolumeAttributes::Trilinear))
        om.SetTable(vtf, 256, atts.GetOpacityAttenuation()*2.0 - 1.0, atts.GetRendererSamples());
    else
        om.SetTable(vtf, 256, atts.GetOpacityAttenuation());
    double actualRange[2];
    bool artificialMin = atts.GetUseColorVarMin();
    bool artificialMax = atts.GetUseColorVarMax();
    if (!artificialMin || !artificialMax)
    {
        GetDataExtents(actualRange, primaryVariable);
        UnifyMinMax(actualRange, 2);
    }
    double range[2];
    range[0] = (artificialMin ? atts.GetColorVarMin() : actualRange[0]);
    range[1] = (artificialMax ? atts.GetColorVarMax() : actualRange[1]);
    if (atts.GetScaling() == VolumeAttributes::Log)
    {
        if (artificialMin)
            if (range[0] > 0)
                range[0] = log10(range[0]);
        if (artificialMax)
            if (range[1] > 0)
                range[1] = log10(range[1]);
    }
    else if (atts.GetScaling() == VolumeAttributes::Skew)
    {
        if (artificialMin)
        {
            double newMin = vtkSkewValue(range[0], range[0], range[1],
                                         atts.GetSkewFactor());
            range[0] = newMin;
        }
        if (artificialMax)
        {
            double newMax = vtkSkewValue(range[1], range[0], range[1],
                                         atts.GetSkewFactor());
            range[1] = newMax;
        }
    }
    om.SetMin(range[0]);
    om.SetMax(range[1]);

    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
    {
        if (!artificialMin)
            range[0] = 0.;
        if (!artificialMax)
        {
/* Don't need this code, because the rays will be in depth ... 0->1.
            double bounds[6];
            GetSpatialExtents(bounds);
            UnifyMinMax(bounds, 6);
            double diag = sqrt((bounds[1]-bounds[0])*(bounds[1]-bounds[0]) +
                               (bounds[3]-bounds[2])*(bounds[3]-bounds[2]) +
                               (bounds[5]-bounds[4])*(bounds[5]-bounds[4]));
            range[1] = (actualRange[1]*diag) / 2.;
 */
            range[1] = (actualRange[1]) / 4.;
        }
    }

    //
    // Determine which variables to use and tell the ray function.
    //
    VarList vl;
    avtDataset_p input = GetTypedInput();
    avtDatasetExaminer::GetVariableList(input, vl);
    int primIndex = -1;
    int opacIndex = -1;
    int gradIndex = -1;
    int count = 0;
    char gradName[128];
    const char *gradvar = atts.GetOpacityVariable().c_str();
    if (strcmp(gradvar, "default") == 0)
        gradvar = primaryVariable;
    // This name is explicitly sent to the avtGradientExpression in
    // the avtVolumePlot.
    SNPRINTF(gradName, 128, "_%s_gradient", gradvar);

    for (int i = 0 ; i < vl.nvars ; i++)
    {
        if ((strstr(vl.varnames[i].c_str(), "vtk") != NULL) &&
            (strstr(vl.varnames[i].c_str(), "avt") != NULL))
            continue;

        if (vl.varnames[i] == primaryVariable)
        {
            primIndex = count;
        }
        if (vl.varnames[i] == atts.GetOpacityVariable())
        {
            opacIndex = count;
        }
        if (vl.varnames[i] == gradName)
        {
            gradIndex = count;
        }
        count += vl.varsizes[i];
    }

    if (primIndex == -1)
    {
        if (vl.nvars <= 0)
        {
            debug1 << "Could not locate primary variable "
                   << primaryVariable << ", assuming that we are running "
                   << "in parallel and have more processors than domains."
                   << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException, primaryVariable);
        }
    }
    if (opacIndex == -1)
    {
        if (atts.GetOpacityVariable() == "default")
        {
            opacIndex = primIndex;
        }
        else if (vl.nvars <= 0)
        {
            debug1 << "Could not locate opacity variable "
                   << atts.GetOpacityVariable().c_str() << ", assuming that we "
                   << "are running in parallel and have more processors "
                   << "than domains." << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException,atts.GetOpacityVariable());
        }
    }
    if (  atts.GetRendererType() != VolumeAttributes::RayCastingIntegration &&
          atts.GetLightingFlag() &&
          gradIndex == -1)
    {
        if (vl.nvars <= 0)
        {
            debug1 << "Could not locate gradient variable, assuming that we "
                   << "are running in parallel and have more processors "
                   << "than domains." << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException,gradName);
        }
    }

    int newPrimIndex = UnifyMaximumValue(primIndex);
    if (primIndex >= 0 && newPrimIndex != primIndex)
    {
        //
        // We shouldn't ever have different orderings for our variables.
        //
        EXCEPTION1(InvalidVariableException, primaryVariable);
    }
    primIndex = newPrimIndex;

    int newOpacIndex = UnifyMaximumValue(opacIndex);
    if (opacIndex >= 0 && newOpacIndex != opacIndex)
    {
        //
        // We shouldn't ever have different orderings for our variables.
        //
        EXCEPTION1(InvalidVariableException, atts.GetOpacityVariable());
    }
    opacIndex = newOpacIndex;

    int newGradIndex = UnifyMaximumValue(gradIndex);
    if (gradIndex >= 0 && newGradIndex != gradIndex)
    {
        //
        // We shouldn't ever have different orderings for our variables.
        //
        EXCEPTION1(InvalidVariableException, gradName);
    }
    gradIndex = newGradIndex;

    //
    // Set up lighting
    //
    avtFlatLighting fl;
    avtLightingModel *lm = &fl;
    double gradMax = 0.0, lightingPower = 1.0;
    if (atts.GetLowGradientLightingReduction() != VolumeAttributes::Off)
    {
        gradMax = atts.GetLowGradientLightingClampValue();
        if (atts.GetLowGradientLightingClampFlag() == false)
        {
            double gradRange[2] = {0,0};
            GetDataExtents(gradRange, gradName);
            gradMax = gradRange[1];
        }
        switch (atts.GetLowGradientLightingReduction())
        {
          case VolumeAttributes::Lowest:   lightingPower = 1./16.; break;
          case VolumeAttributes::Lower:    lightingPower = 1./8.;  break;
          case VolumeAttributes::Low:      lightingPower = 1./4.;  break;
          case VolumeAttributes::Medium:   lightingPower = 1./2.;  break;
          case VolumeAttributes::High:     lightingPower = 1.;     break;
          case VolumeAttributes::Higher:   lightingPower = 2.;     break;
          case VolumeAttributes::Highest:  lightingPower = 4.;     break;
          default: break;
        }
    }
    avtPhong phong(gradMax, lightingPower);
    if (atts.GetLightingFlag())
    {
        lm = &phong;
    }
    else
    {
        lm = &fl;
    }

    avtOpacityMap *om2 = NULL;
    if (primIndex == opacIndex)
    {
        // Note that we are forcing the color variables range onto the
        // opacity variable.
        om2 = &om;
    }
    else
    {
        om2 = new avtOpacityMap(256);
        om2->SetTable(vtf, 256, atts.GetOpacityAttenuation());
        double range[2];

        bool artificialMin = atts.GetUseOpacityVarMin();
        bool artificialMax = atts.GetUseOpacityVarMax();
        if (!artificialMin || !artificialMax)
        {
            InputSetActiveVariable(atts.GetOpacityVariable().c_str());
            avtDatasetExaminer::GetDataExtents(input, range);
            UnifyMinMax(range, 2);
            InputSetActiveVariable(primaryVariable);
        }
        range[0] = (artificialMin ? atts.GetOpacityVarMin() : range[0]);
        range[1] = (artificialMax ? atts.GetOpacityVarMax() : range[1]);
        om2->SetMin(range[0]);
        om2->SetMax(range[1]);
        // LEAK!!
    }
    avtCompositeRF *compositeRF = new avtCompositeRF(lm, &om, om2);
    if (atts.GetRendererType() == VolumeAttributes::RayCasting && atts.GetSampling() == VolumeAttributes::Trilinear){
        compositeRF->SetTrilinearSampling(true);
        double *matProp = atts.GetMaterialProperties();
        double materialPropArray[4];
        materialPropArray[0] = matProp[0];
        materialPropArray[1] = matProp[1];
        materialPropArray[2] = matProp[2];
        materialPropArray[3] = matProp[3];
        compositeRF->SetMaterial(materialPropArray);
    }
    else
        compositeRF->SetTrilinearSampling(false);
    avtIntegrationRF *integrateRF = new avtIntegrationRF(lm);

    compositeRF->SetColorVariableIndex(primIndex);
    compositeRF->SetOpacityVariableIndex(opacIndex);
    if (atts.GetLightingFlag())
        compositeRF->SetGradientVariableIndex(gradIndex);
    integrateRF->SetPrimaryVariableIndex(primIndex);
    integrateRF->SetRange(range[0], range[1]);
    if (atts.GetSampling() == VolumeAttributes::KernelBased)
    {
        software->SetKernelBasedSampling(true);
        compositeRF->SetWeightVariableIndex(count);
    }

    if (atts.GetRendererType() == VolumeAttributes::RayCasting && atts.GetSampling() == VolumeAttributes::Trilinear)
        software->SetTrilinear(true);
    else
        software->SetTrilinear(false);
    
    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
        software->SetRayFunction(integrateRF);
    else
        software->SetRayFunction(compositeRF);

    software->SetSamplesPerRay(atts.GetSamplesPerRay());

    const int *size = window.GetSize();
    software->SetScreen(size[0], size[1]);

    const View3DAttributes &view = window.GetView3D();
    avtViewInfo vi;
    CreateViewInfoFromViewAttributes(vi, view);

    avtDataObject_p inputData = GetInput();
    int width_,height_,depth_;
    if (GetLogicalBounds(inputData, width_,height_,depth_))      
    {
        // if we have logical bounds, compute the slices automatically
        double viewDirection[3];
        int numSlices;
        
        viewDirection[0] = (view.GetViewNormal()[0] > 0)? view.GetViewNormal()[0]: -view.GetViewNormal()[0];
        viewDirection[1] = (view.GetViewNormal()[1] > 0)? view.GetViewNormal()[1]: -view.GetViewNormal()[1];
        viewDirection[2] = (view.GetViewNormal()[2] > 0)? view.GetViewNormal()[2]: -view.GetViewNormal()[2];

        numSlices = (width_*viewDirection[0] + height_*viewDirection[1] + depth_*viewDirection[2]) * atts.GetRendererSamples();

        if (atts.GetRendererType() == VolumeAttributes::RayCasting && atts.GetSampling() == VolumeAttributes::Trilinear)
            software->SetSamplesPerRay(numSlices);
    }
    software->SetView(vi);
    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
    {
        integrateRF->SetDistance(view.GetFarPlane()-view.GetNearPlane());
        integrateRF->SetWindowSize(size[0], size[1]);
    }

    double view_dir[3];
    view_dir[0] = vi.focus[0] - vi.camera[0];
    view_dir[1] = vi.focus[1] - vi.camera[1];
    view_dir[2] = vi.focus[2] - vi.camera[2];
    double mag = sqrt(view_dir[0]*view_dir[0] + view_dir[1]*view_dir[1]
                      + view_dir[2]*view_dir[2]);
    if (mag != 0.) // only 0 if focus and camera are the same
    {
        view_dir[0] /= mag;
        view_dir[1] /= mag;
        view_dir[2] /= mag;
    }
    lm->SetViewDirection(view_dir);
    lm->SetViewUp(vi.viewUp);
    lm->SetLightInfo(window.GetLights());
    const RenderingAttributes &render_atts = window.GetRenderAtts();
    if (render_atts.GetSpecularFlag())
    {
        lm->SetSpecularInfo(render_atts.GetSpecularFlag(),
                            render_atts.GetSpecularCoeff(),
                            render_atts.GetSpecularPower());
    }

    //
    // Set the volume renderer's background color and mode from the
    // window attributes.
    //
    software->SetBackgroundMode(window.GetBackgroundMode());
    software->SetBackgroundColor(window.GetBackground());
    software->SetGradientBackgroundColors(window.GetGradBG1(),
                                          window.GetGradBG2());

    //
    // We have to set up a sample point "arbitrator" to allow small cells
    // to be included in the final picture.
    //
    avtOpacityMapSamplePointArbitrator arb(om2, opacIndex);
    avtRay::SetArbitrator(&arb);

    //
    // Do the funny business to force an update.
    //
    avtDataObject_p dob = software->GetOutput();
    dob->Update(GetGeneralContract());

    if (atts.GetRendererType() == VolumeAttributes::RayCastingIntegration)
        integrateRF->OutputRawValues("integration.data");

    //
    // Free up some memory and clean up.
    //
    delete software;
    avtRay::SetArbitrator(NULL);
    delete compositeRF;
    delete integrateRF;

    //
    // Copy the output of the volume renderer to our output.
    //
    avtImage_p output;
    CopyTo(output, dob);
    return  output;
}
avtImage_p
avtVolumeFilter::RenderImageRaycastingSLIVR(avtImage_p opaque_image,
                             const WindowAttributes &window)
{
    //
    // We need to create a dummy pipeline with the volume renderer that we
    // can force to execute within our "Execute".  Start with the source.
    //
    avtSourceFromAVTDataset termsrc(GetTypedInput());

    //
    // Set up the volume renderer.
    //
    avtRayTracer *software = new avtRayTracer;
    software->SetInput(termsrc.GetOutput());
    software->InsertOpaqueImage(opaque_image);

    unsigned char vtf[4*256];
    atts.GetTransferFunction(vtf);
    avtOpacityMap om(256);

    om.SetTableFloat(vtf, 256, atts.GetOpacityAttenuation()*2.0 - 1.0, atts.GetRendererSamples());
    
    double actualRange[2];
    bool artificialMin = atts.GetUseColorVarMin();
    bool artificialMax = atts.GetUseColorVarMax();
    if (!artificialMin || !artificialMax)
    {
        GetDataExtents(actualRange, primaryVariable);
        UnifyMinMax(actualRange, 2);
    }

    double range[2];
    range[0] = (artificialMin ? atts.GetColorVarMin() : actualRange[0]);
    range[1] = (artificialMax ? atts.GetColorVarMax() : actualRange[1]);

    if (atts.GetScaling() == VolumeAttributes::Log)
    {
        if (artificialMin)
            if (range[0] > 0)
                range[0] = log10(range[0]);
        if (artificialMax)
            if (range[1] > 0)
                range[1] = log10(range[1]);
    }
    else if (atts.GetScaling() == VolumeAttributes::Skew)
    {
        if (artificialMin)
        {
            double newMin = vtkSkewValue(range[0], range[0], range[1],
                                         atts.GetSkewFactor());
            range[0] = newMin;
        }
        if (artificialMax)
        {
            double newMax = vtkSkewValue(range[1], range[0], range[1],
                                         atts.GetSkewFactor());
            range[1] = newMax;
        }
    }
    om.SetMin(range[0]);
    om.SetMax(range[1]);
    
   

    //
    // Determine which variables to use and tell the ray function.
    //
    VarList vl;
    avtDataset_p input = GetTypedInput();
    avtDatasetExaminer::GetVariableList(input, vl);

    int primIndex = -1;
    int opacIndex = -1;

    int count = 0;
    char gradName[128];
    const char *gradvar = atts.GetOpacityVariable().c_str();
    if (strcmp(gradvar, "default") == 0)
        gradvar = primaryVariable;
    // This name is explicitly sent to the avtGradientExpression in
    // the avtVolumePlot.
    SNPRINTF(gradName, 128, "_%s_gradient", gradvar);

    for (int i = 0 ; i < vl.nvars ; i++)
    {
        if ((strstr(vl.varnames[i].c_str(), "vtk") != NULL) &&
            (strstr(vl.varnames[i].c_str(), "avt") != NULL))
            continue;

        if (vl.varnames[i] == primaryVariable)
        {
            primIndex = count;
        }
        if (vl.varnames[i] == atts.GetOpacityVariable())
        {
            opacIndex = count;
        }
       // if (vl.varnames[i] == gradName)
       // {
       //     gradIndex = count;
       // }
        count += vl.varsizes[i];
    }

    if (primIndex == -1)
    {
        if (vl.nvars <= 0)
        {
            debug1 << "Could not locate primary variable "
                   << primaryVariable << ", assuming that we are running "
                   << "in parallel and have more processors than domains."
                   << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException, primaryVariable);
        }
    }
    if (opacIndex == -1)
    {
        if (atts.GetOpacityVariable() == "default")
        {
            opacIndex = primIndex;
        }
        else if (vl.nvars <= 0)
        {
            debug1 << "Could not locate opacity variable "
                   << atts.GetOpacityVariable().c_str() << ", assuming that we "
                   << "are running in parallel and have more processors "
                   << "than domains." << endl;
        }
        else
        {
            EXCEPTION1(InvalidVariableException,atts.GetOpacityVariable());
        }
    }
   

    //
    // Set up lighting
    //
    avtFlatLighting fl;
    avtLightingModel *lm = &fl;

    if (atts.GetLightingFlag())
        software->SetLighting(true);
    else
        software->SetLighting(false);



    avtCompositeRF *compositeRF = new avtCompositeRF(lm, &om, &om);
    
    double *matProp = atts.GetMaterialProperties();
    double materialPropArray[4];
    materialPropArray[0] = matProp[0];
    materialPropArray[1] = matProp[1];
    materialPropArray[2] = matProp[2];
    materialPropArray[3] = matProp[3];

    software->SetMatProperties(materialPropArray);

    software->SetRayCastingSLIVR(true);
    software->SetTrilinear(false);

    software->SetTransferFn(&om);
    software->SetRayFunction(compositeRF);            // unsure about this one. RayFunction seems important
    software->SetSamplesPerRay(atts.GetSamplesPerRay());

    const int *size = window.GetSize();
    software->SetScreen(size[0], size[1]);

    const View3DAttributes &view = window.GetView3D();
    avtViewInfo vi;
    CreateViewInfoFromViewAttributes(vi, view);

    avtDataObject_p inputData = GetInput();
    int width_,height_,depth_;
    if (GetLogicalBounds(inputData, width_,height_,depth_))      
    {
        double viewDirection[3];
        int numSlices;
        
        viewDirection[0] = (view.GetViewNormal()[0] > 0)? view.GetViewNormal()[0]: -view.GetViewNormal()[0];
        viewDirection[1] = (view.GetViewNormal()[1] > 0)? view.GetViewNormal()[1]: -view.GetViewNormal()[1];
        viewDirection[2] = (view.GetViewNormal()[2] > 0)? view.GetViewNormal()[2]: -view.GetViewNormal()[2];

        numSlices = (width_*viewDirection[0] + height_*viewDirection[1] + depth_*viewDirection[2]) * atts.GetRendererSamples();

        software->SetSamplesPerRay(numSlices);
        debug5 << "RayCastingSLIVR - slices: "<< numSlices << " : " << width_ << " ,  " << height_  << " , " << depth_ << endl;
    }

    software->SetView(vi);

    double view_dir[3];
    view_dir[0] = vi.focus[0] - vi.camera[0];
    view_dir[1] = vi.focus[1] - vi.camera[1];
    view_dir[2] = vi.focus[2] - vi.camera[2];
    double mag = sqrt(view_dir[0]*view_dir[0] + view_dir[1]*view_dir[1]
                      + view_dir[2]*view_dir[2]);
    if (mag != 0.) // only 0 if focus and camera are the same
    {
        view_dir[0] /= mag;
        view_dir[1] /= mag;
        view_dir[2] /= mag;
    }
    software->SetViewDirection(view_dir);
    software->SetViewUp(vi.viewUp);
    
    double tempLightDir[3];
    tempLightDir[0] = ((window.GetLights()).GetLight(0)).GetDirection()[0];
    tempLightDir[1] = ((window.GetLights()).GetLight(0)).GetDirection()[1];
    tempLightDir[2] = ((window.GetLights()).GetLight(0)).GetDirection()[2];
    software->SetLightDirection(tempLightDir);


    vtkCamera *camera = vtkCamera::New();
    vi.SetCameraFromView(camera);
    vtkMatrix4x4 *cameraMatrix = camera->GetViewTransformMatrix();

    double modelViewMatrix[16];
    modelViewMatrix[0] = cameraMatrix->GetElement(0,0);
    modelViewMatrix[1] = cameraMatrix->GetElement(0,1);
    modelViewMatrix[2] = cameraMatrix->GetElement(0,2);
    modelViewMatrix[3] = cameraMatrix->GetElement(0,3);

    modelViewMatrix[4] = cameraMatrix->GetElement(1,0);
    modelViewMatrix[5] = cameraMatrix->GetElement(1,1);
    modelViewMatrix[6] = cameraMatrix->GetElement(1,2);
    modelViewMatrix[7] = cameraMatrix->GetElement(1,3);

    modelViewMatrix[8]  = cameraMatrix->GetElement(2,0);
    modelViewMatrix[9]  = cameraMatrix->GetElement(2,1);
    modelViewMatrix[10] = cameraMatrix->GetElement(2,2);
    modelViewMatrix[11] = cameraMatrix->GetElement(2,3);

    modelViewMatrix[12] = cameraMatrix->GetElement(3,0);
    modelViewMatrix[13] = cameraMatrix->GetElement(3,1);
    modelViewMatrix[14] = cameraMatrix->GetElement(3,2);
    modelViewMatrix[15] = cameraMatrix->GetElement(3,3);
    software->SetModelViewMatrix(modelViewMatrix);

    //
    // Set the volume renderer's background color and mode from the
    // window attributes.
    //
    software->SetBackgroundMode(window.GetBackgroundMode());
    software->SetBackgroundColor(window.GetBackground());
    software->SetGradientBackgroundColors(window.GetGradBG1(),
                                          window.GetGradBG2());

    //
    // Do the funny business to force an update. ... and called avtDataObject
    //
    avtDataObject_p dob = software->GetOutput();
    dob->Update(GetGeneralContract());

    //
    // Free up some memory and clean up.
    //
    delete software;
    avtRay::SetArbitrator(NULL);

    delete compositeRF;

    //
    // Copy the output of the volume renderer to our output.
    //
    avtImage_p output;
    CopyTo(output, dob);
    return  output;
}
void
avtVolumeFilter::Execute(void)
{
    int i;

    // Copy our input to the output
    avtDataObject_p input = GetInput();
    GetOutput()->Copy(*input);

    // The rest of this method is to set up histogram.
    int numValsInHist = 256;
    avtDataset_p ds = GetTypedInput();
    double minmax[2] = { 0, 1 };
    bool artificialMin = atts.GetUseColorVarMin();
    bool artificialMax = atts.GetUseColorVarMax();
    if (!artificialMin || !artificialMax)
        avtDatasetExaminer::GetDataExtents(ds, minmax, primaryVariable);
    minmax[0] = (artificialMin ? atts.GetColorVarMin() : minmax[0]);
    minmax[1] = (artificialMax ? atts.GetColorVarMax() : minmax[1]);
    if (atts.GetScaling() == VolumeAttributes::Log)
    {
        if (artificialMin)
            if (minmax[0] > 0)
                minmax[0] = log10(minmax[0]);
        if (artificialMax)
            if (minmax[1] > 0)
                minmax[1] = log10(minmax[1]);
    }
    else if (atts.GetScaling() == VolumeAttributes::Skew)
    {
        if (artificialMin)
        {
            double newMin = vtkSkewValue(minmax[0], minmax[0], minmax[1],
                                         atts.GetSkewFactor());
            minmax[0] = newMin;
        }
        if (artificialMax)
        {
            double newMax = vtkSkewValue(minmax[1], minmax[0], minmax[1],
                                         atts.GetSkewFactor());
            minmax[1] = newMax;
        }
    }

    std::string s = std::string(primaryVariable);
    std::vector<VISIT_LONG_LONG> numvals(numValsInHist, 0);
    int t1 = visitTimer->StartTimer();
    avtDatasetExaminer::CalculateHistogram(ds, s, minmax[0], minmax[1], numvals);

    VISIT_LONG_LONG maxVal = 0;
    for (i = 0 ; i < numValsInHist ; i++)
        if (numvals[i] > maxVal)
            maxVal = numvals[i];

    std::vector<float> h1(numValsInHist, 0.);
    if (maxVal != 0)
    {
        for (i = 0 ; i < numValsInHist ; i++)
            h1[i] = ((double) numvals[i]) / ((double) maxVal);
    }

    MapNode vhist;
    vhist["histogram_size"] = numValsInHist;
    vhist["histogram_1d"] = h1;
    // vhist["histogram_2d"] = compressedbuf; <<-- not doing this
    visitTimer->StopTimer(t1, "Calculating histogram");

    GetOutput()->GetInfo().GetAttributes().AddPlotInformation("VolumeHistogram", vhist);
}
Example #17
0
void
avtRayCompositer::Execute(void)
{
    int  i, j;
    avtVolume *volume = GetTypedInput()->GetVolume();
    if (volume == NULL)
    {
        // This comes up in the following scenario:
        // An internal error occurs in the sampling phase.  An exception is
        // thrown.  That exception causes avtSamplePoints::SetVolume to be
        // not called.  When its not called, its data member "volume" is
        // not initialized.  So we get a NULL here.
        //
        // So: in summary, we only get into this situation if there was an
        // error before this module was called.
        EXCEPTION0(ImproperUseException);
    }

    //
    // Determine the size of the screen.
    //
    int  height = volume->GetRestrictedVolumeHeight();
    int  width  = volume->GetRestrictedVolumeWidth();

    //
    // This is a test to determine if there is nothing in the partition we
    // are supposed to composite -- since we don't have access to the 
    // partition, this is a bit of a hack and assumes how the partitioning
    // is done.
    // 
    if (volume->GetRestrictedMinHeight() >= volume->GetVolumeHeight() ||
        height <= 0 || width <= 0)
    {
        SetOutputImage(NULL);
        return;
    }

    volume->SetProgressCallback(RCPixelProgressCallback, this);

    //
    // Create an image that we can place each pixel into.
    //
    vtkImageData *image = avtImageRepresentation::NewImage(width, height);

    //
    // Populate an initial image, either with the background or with an
    // opaque image that is to be inserted into the middle of the rendering.
    //
    unsigned char *data = (unsigned char *)image->GetScalarPointer(0, 0, 0);
    int nPixels = width*height;
    double *zbuffer = new double[nPixels];
    for (i = 0 ; i < nPixels ; i++)
    {
        zbuffer[i] = 1.;
    }

    //
    // Draw the initial background into the image.
    //
    int fullHeight = volume->GetVolumeHeight();
    int fullWidth  = volume->GetVolumeWidth();
    vtkImageData *fullImage = avtImageRepresentation::NewImage(fullWidth,
                                                               fullHeight);
    unsigned char *fulldata = (unsigned char *) 
                                          fullImage->GetScalarPointer(0, 0, 0);
    FillBackground(fulldata, fullWidth, fullHeight);

    //
    // Now that we have the background in the full image, copy it into what
    // we need for this image.
    //
    int minWidth  = volume->GetRestrictedMinWidth();
    int minHeight = volume->GetRestrictedMinHeight();
    for (i = 0 ; i < nPixels ; i++)
    {
        int restrictedWidth  = i % width;
        int restrictedHeight = i / width;
        int realWidth  = restrictedWidth + minWidth;
        int realHeight = restrictedHeight + minHeight;
        int indexIntoFullData = realHeight*fullWidth + realWidth;
        for (j = 0 ; j < 3 ; j++)
        {
            data[3*i+j] = fulldata[3*indexIntoFullData+j];
        }
    }

    //
    // We were given an opaque image to insert into the picture.  Worse,
    // the image probably has different dimensions if we are running in
    // parallel.  This is captured by the notion of a restricted volume.
    //
    if (*opaqueImage != NULL)
    {
        int minW = volume->GetRestrictedMinWidth();
        int minH = volume->GetRestrictedMinHeight();
        vtkImageData  *opaqueImageVTK = opaqueImage->GetImage().GetImageVTK();
        float         *opaqueImageZB  = opaqueImage->GetImage().GetZBuffer();
        unsigned char *opaqueImageData =
                    (unsigned char *)opaqueImageVTK->GetScalarPointer(0, 0, 0);
        int n_comp = opaqueImageVTK->GetNumberOfScalarComponents();

        for (int i = 0 ; i < width ; i++)
        {
            for (int j = 0 ; j < height ; j++)
            {
                int index = j*width + i;
                int opaqueImageIndex = (j+minH)*fullWidth + (i+minW);
                zbuffer[index] = opaqueImageZB[opaqueImageIndex];
                if (zbuffer[index] != 1.)
                {
                    data[3*index    ] = opaqueImageData[n_comp*opaqueImageIndex];
                    data[3*index + 1] = opaqueImageData[n_comp*opaqueImageIndex+1];
                    data[3*index + 2] = opaqueImageData[n_comp*opaqueImageIndex+2];
                }
            }
        }
    }
    
    //
    // Have the volume cast our ray function on all of its valid rays and put
    // the output in this screen.  There is a lot of work here.
    //
    volume->GetPixels(rayfoo, data, zbuffer);

    //
    // Tell our output what its new image is.
    //
    SetOutputImage(image);

    //
    // Clean up memory.
    //
    image->Delete();
    fullImage->Delete();
    delete [] zbuffer;
}
void
avtSamplePointCommunicator::Execute(void)
{
#ifdef PARALLEL
    int timingsIndex = visitTimer->StartTimer();
    int nProgressStages = 14;
    int currentStage    = 1;

    //
    // We are typically still waiting for the sample point extractors, so put
    // in a barrier so this filter can be absolved of blame.
    //
    int t1 = visitTimer->StartTimer();
    Barrier();
    visitTimer->StopTimer(t1, "Waiting for other processors to catch up");
    UpdateProgress(currentStage++, nProgressStages);

    if (imagePartition == NULL)
    {
        EXCEPTION0(ImproperUseException);
    }

    int t2 = visitTimer->StartTimer();
    EstablishImagePartitionBoundaries();
    visitTimer->StopTimer(t2, "Establishing partition boundaries");
    UpdateProgress(currentStage++, nProgressStages);

    avtVolume *involume      = GetTypedInput()->GetVolume();
    int        volumeWidth   = involume->GetVolumeWidth();
    int        volumeHeight  = involume->GetVolumeHeight();
    int        volumeDepth   = involume->GetVolumeDepth();

    //
    // Have the rays serialize their sample points.
    //
    int t3 = visitTimer->StartTimer();
    int   *out_points_count = new int[numProcs];
    char **out_points_msgs  = new char*[numProcs];
    char *tmpcat1 = involume->ConstructMessages(imagePartition,
                                            out_points_msgs, out_points_count);
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t3, "Building sample messages");

    //
    // Have the cells serialize themselves.
    //
    int t4 = visitTimer->StartTimer();
    avtCellList *incl  = GetTypedInput()->GetCellList();
    int   *out_cells_count = new int[numProcs];
    char **out_cells_msgs  = new char*[numProcs];
    char *tmpcat2 = incl->ConstructMessages(imagePartition, out_cells_msgs,
                                            out_cells_count);
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t4, "Building cell messages");

    //
    // Determine which cells/points are going where and distribute them in a
    // way that minimizes communication.
    //
    int t5 = visitTimer->StartTimer();
    DetermineImagePartitionAssignments(out_points_count, out_cells_count);
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t5, "Assigning image portions to processors");

    //
    // The messages are set up for image partition assignments of 0->0, 1->1,
    // etc.  Rework them so that they can be sent in to our CommunicateMessage
    // routines.
    //
    int t6 = visitTimer->StartTimer();
    char *pointsOnThisProc;
    int   numPointsOnThisProc;
    char *concat1 = MutateMessagesByAssignment(out_points_msgs,
                      out_points_count, pointsOnThisProc, numPointsOnThisProc);
    delete [] tmpcat1; // No longer needed.  out_points_msgs contains the
                       // same info with the proper ordering.
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t6, "Preparing sample messages to send");

    int t7 = visitTimer->StartTimer();
    char *cellsOnThisProc;
    int   numCellsOnThisProc;
    char *concat2 = MutateMessagesByAssignment(out_cells_msgs,
                         out_cells_count, cellsOnThisProc, numCellsOnThisProc);
    delete [] tmpcat2; // No longer needed.  out_cells_msgs contains the
                       // same info with the proper ordering.
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t7, "Preparing cell messages to send");

    //
    // Send the sample points.
    //
    int t8 = visitTimer->StartTimer();
    int   *in_points_count  = new int[numProcs];
    char **in_points_msgs   = new char*[numProcs];
    char *concat3 = CommunicateMessages(out_points_msgs, out_points_count,
                                        in_points_msgs, in_points_count);
    delete [] concat1;
    delete [] out_points_count;
    delete [] out_points_msgs;
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t8, "Sending sample points");

    //
    // Send the cells.
    //
    int t9 = visitTimer->StartTimer();
    int   *in_cells_count  = new int[numProcs];
    char **in_cells_msgs   = new char*[numProcs];
    char *concat4 = CommunicateMessages(out_cells_msgs, out_cells_count,
                                        in_cells_msgs, in_cells_count);
    delete [] concat2;
    delete [] out_cells_count;
    delete [] out_cells_msgs;
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t9, "Sending cells");

    //
    // Create the output volume and let it know that it only is for a restricted
    // part of the volume.
    //
    int   outMinWidth, outMaxWidth, outMinHeight, outMaxHeight;
    imagePartition->GetThisPartition(outMinWidth, outMaxWidth, outMinHeight,
                                     outMaxHeight);
    int nv = GetTypedInput()->GetNumberOfVariables();
    nv = UnifyMaximumValue(nv);
    if (GetTypedInput()->GetUseWeightingScheme())
        GetTypedOutput()->SetUseWeightingScheme(true);
    if (GetTypedOutput()->GetVolume() == NULL)
        GetTypedOutput()->SetVolume(volumeWidth, volumeHeight, volumeDepth);
    else
        GetTypedOutput()->GetVolume()->ResetSamples();

    avtVolume *outvolume = GetTypedOutput()->GetVolume();
    outvolume->Restrict(outMinWidth, outMaxWidth, outMinHeight, outMaxHeight);

    //
    // Put the sample points into our output volume.
    //
    int t10 = visitTimer->StartTimer();
    outvolume->ExtractSamples(in_points_msgs, in_points_count, numProcs);
    delete [] concat3;
    delete [] in_points_count;
    delete [] in_points_msgs;
    UpdateProgress(currentStage++, nProgressStages);

    outvolume->ExtractSamples(&pointsOnThisProc, &numPointsOnThisProc, 1);
    delete [] pointsOnThisProc;
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t10, "Getting sample points out of messages");
    
    //
    // Extract the sample points from the new cells.
    //
    int t11 = visitTimer->StartTimer();
    avtCellList *outcl = GetTypedOutput()->GetCellList();
    outcl->SetJittering(jittering);
    outcl->Restrict(outMinWidth, outMaxWidth, outMinHeight, outMaxHeight);
    outcl->ExtractCells(in_cells_msgs, in_cells_count, numProcs, outvolume);
    UpdateProgress(currentStage++, nProgressStages);
    delete [] concat4;
    delete [] in_cells_count;
    delete [] in_cells_msgs;

    outcl->ExtractCells(&cellsOnThisProc, &numCellsOnThisProc, 1, outvolume);
    delete [] cellsOnThisProc;
    UpdateProgress(currentStage++, nProgressStages);
    visitTimer->StopTimer(t11, "Getting sample points out of cells");

    visitTimer->StopTimer(timingsIndex, "Sample point communication");
#else
    GetTypedOutput()->Copy(*(GetTypedInput()));
#endif
}