float *
avtR2Fvariance::FinalizePass(int pass)
{
    float *rv = NULL;
    if (pass == 0)
    {
        double *rt2  = new double[nBins];
        int    *cnt2 = new int[nBins];
        SumIntArrayAcrossAllProcessors(count, cnt2, nBins);
        SumDoubleArrayAcrossAllProcessors(running_total_ave, rt2, nBins);
        for (int i = 0 ; i < nBins ; i++)
        {
            if (cnt2[i] > 0)
                running_total_ave[i] = rt2[i] / cnt2[i];
            else
                running_total_ave[i] = undefinedVal;
        }
        delete [] rt2;
        delete [] cnt2;
    }
    else
    {
        rv           = new float[nBins];
        double *rt2  = new double[nBins];
        int    *cnt2 = new int[nBins];
        SumIntArrayAcrossAllProcessors(count, cnt2, nBins);
        SumDoubleArrayAcrossAllProcessors(running_total_variance, rt2, nBins);
        for (int i = 0 ; i < nBins ; i++)
        {
            if (cnt2[i] > 0)
                rv[i] = rt2[i] / cnt2[i];
            else
                rv[i] = undefinedVal;
        }
        delete [] rt2;
        delete [] cnt2;
    }

    this->pass++;
    return rv;
}
float *
avtR2Frms::FinalizePass(int pass)
{
    float  *rv   = new float[nBins];
    double *rt2  = new double[nBins];
    int    *cnt2 = new int[nBins];
    SumIntArrayAcrossAllProcessors(count, cnt2, nBins);
    SumDoubleArrayAcrossAllProcessors(running_total, rt2, nBins);
    for (int i = 0 ; i < nBins ; i++)
    {
        if (cnt2[i] > 0)
            rv[i] = sqrt(rt2[i] / cnt2[i]);
        else
            rv[i] = undefinedVal;
    }
    delete [] rt2;
    delete [] cnt2;

    return rv;
}
void
avtAggregateChordLengthDistributionQuery::PostExecute(void)
{
    int   i;

    int times = 0;
    char name[1024];
    sprintf(name, "cld_a%d.ult", times++);

    if (PAR_Rank() == 0)
    {
        bool lookingForUnused = true;
        while (lookingForUnused)
        {
            ifstream ifile(name);
            if (ifile.fail())
                lookingForUnused = false;
            else
                sprintf(name, "cld_a%d.ult", times++);
        }
    }

    char msg[1024];
    sprintf(msg, "The aggregate chord length distribution has been outputted "
            "as an Ultra file (%s), which can then be imported into VisIt.", 
            name);
    SetResultMessage(msg);
    SetResultValue(0.);

    int *nc2 = new int[numBins];
    SumIntArrayAcrossAllProcessors(numChords, nc2, numBins);
    delete [] numChords;
    numChords = nc2;

    if (PAR_Rank() == 0)
    {
        double binWidth = (maxLength-minLength) / numBins;
        double totalArea = 0.;
        for (i = 0 ; i < numBins ; i++)
            totalArea += binWidth*numChords[i];
        if (totalArea == 0.)
        {
            sprintf(msg, "The chord length distribution could not be "
                    "calculated because none of the lines intersected the data"
                    " set.  If you have used a fairly large number of lines, "
                    "then this may be indicative of an error state.");
            SetResultMessage(msg);
            return;
        }

        ofstream ofile(name);
        if (ofile.fail())
        {
            sprintf(msg, "Unable to write out file containing distribution.");
            SetResultMessage(msg);
            return;
        }
        ofile << "# Chord length distribution - aggregate" << endl;

        for (int i = 0 ; i < numBins ; i++)
        {
            //double x = minLength + (i+0.5)*binWidth;
            double x1 = minLength + (i)*binWidth;
            double x2 = minLength + (i+1)*binWidth;
            double y = numChords[i] / totalArea; // Make it be 
                            // a distribution ... the area under the curve: 1
            ofile << x1 << " " << y << endl;
            ofile << x2 << " " << y << endl;
        }
    }
}
void
avtConnComponentsCentroidQuery::PostExecute(void)
{
    // get # of cells per component (from all processors)
    int    *sum_res_int = new int[nComps];
    SumIntArrayAcrossAllProcessors(&nCellsPerComp[0], sum_res_int, nComps);
    memcpy(&nCellsPerComp[0],sum_res_int,nComps * sizeof(int));

    delete [] sum_res_int;

    // get centroid values (from all processors)
    double *sum_res_dbl = new double[nComps];

    SumDoubleArrayAcrossAllProcessors(&xCentroidPerComp[0],
                                      sum_res_dbl,
                                      nComps);

    memcpy(&xCentroidPerComp[0],sum_res_dbl,nComps * sizeof(double));

    SumDoubleArrayAcrossAllProcessors(&yCentroidPerComp[0],
                                      sum_res_dbl,
                                      nComps);

    memcpy(&yCentroidPerComp[0],sum_res_dbl,nComps * sizeof(double));

    SumDoubleArrayAcrossAllProcessors(&zCentroidPerComp[0],
                                      sum_res_dbl,
                                      nComps);

    memcpy(&zCentroidPerComp[0],sum_res_dbl,nComps * sizeof(double));

    delete [] sum_res_dbl;

    // create output message

    if(PAR_Rank() == 0)
    {
        std::string msg = "";
        char buff[2048];

        if(nComps == 1)
        {SNPRINTF(buff,2048,"Found %d connected component\n",nComps);}
        else
        {SNPRINTF(buff,2048,"Found %d connected components\n",nComps);}

        msg += buff;

        // pack values into a a single vector for query output
        std::vector<double> result_vec(nComps *3);

        for(int i=0;i<nComps;i++)
        {
            // get number of cells for current component
            double n_comp_cells =  (double)nCellsPerComp[i];
            // calculate centriod values for the current component
            xCentroidPerComp[i] /= n_comp_cells;
            yCentroidPerComp[i] /= n_comp_cells;
            zCentroidPerComp[i] /= n_comp_cells;

            // pack into result vector
            result_vec[i*3 + 0] = xCentroidPerComp[i];
            result_vec[i*3 + 1] = yCentroidPerComp[i];
            result_vec[i*3 + 2] = zCentroidPerComp[i];
        }

    
        std::string format  =  "Component %d [%d cells] Centroid = (" 
                            + queryAtts.GetFloatFormat()  +","
                            + queryAtts.GetFloatFormat()  +","
                            + queryAtts.GetFloatFormat()  +")\n";
    
        // prepare the output message
        for(int i=0;i<nComps;i++)
        {
            SNPRINTF(buff,1024,
                     format.c_str(),
                    i,
                    nCellsPerComp[i],
                    xCentroidPerComp[i],
                    yCentroidPerComp[i],
                    zCentroidPerComp[i]);

            msg += buff;
        }

        // set result message
        SetResultMessage(msg);

        // set result values
        SetResultValues(result_vec);
    }
}
Exemplo n.º 5
0
void
avtImagePartition::EstablishPartitionBoundaries(int *samples)
{
    int i, j;

    int first_scanline = (shouldDoTiling ? tile_height_min : 0);
    int last_scanline = (shouldDoTiling ? tile_height_max : height);

    //
    // Find out how many samples there are in each scanline across all procs.
    //
    const int n_scanlines = last_scanline - first_scanline;

    if (numProcessors > n_scanlines)
    {
        for (i = 0 ; i < n_scanlines ; i++)
        {
            partitionStartsOnScanline[i] = i;
            partitionStopsOnScanline[i]  = i;
            stpAssignments[i] = i;
        }
        for (i = n_scanlines ; i < numProcessors ; i++)
        {
            partitionStartsOnScanline[i] = n_scanlines+1;
            partitionStopsOnScanline[i]  = n_scanlines;
        }

        establishedPartitionBoundaries = true;
        return;
    }
    else if (numProcessors >= 32)
    {
        int numScanlinesPerProc = n_scanlines/numProcessors;
        int numExtraUntil = (n_scanlines % numProcessors);
        for (i = 0 ; i < numProcessors ; i++)
        {
            partitionStartsOnScanline[i] = numScanlinesPerProc*i;
            partitionStartsOnScanline[i] +=
                (i > numExtraUntil ? numExtraUntil : i);
            partitionStopsOnScanline[i] =
                partitionStartsOnScanline[i] + numScanlinesPerProc-1;
            partitionStopsOnScanline[i] +=
                (i < numExtraUntil ? 1 : 0);
            for (j = partitionStartsOnScanline[i] ;
                    j <= partitionStopsOnScanline[i] ; j++)
                stpAssignments[j] = i;
        }

        establishedPartitionBoundaries = true;
        return;
    }

    std::vector<int> allSamples(n_scanlines);
    stpAssignments.resize(n_scanlines, 0);
    SumIntArrayAcrossAllProcessors(samples, &allSamples[0], n_scanlines);

    //
    // Find out how many total samples there are and what the target is.
    //
    int totalSamples = 0;
    for (i = first_scanline ; i < last_scanline ; i++)
    {
        // We need to iterate over scanlines, but our arrays are (of course)
        // 0-based.  Construct a value which gives the array index that
        // corresponds to the current scanline.
        size_t idx = i - first_scanline;

        //
        // There has been some problems with overflows when we have lots of
        // sample points and we are in send cells mode (since send cells
        // overestimates pretty dramatically how many samples it has).
        //
        // Normalize the number of samples.
        //
        if (allSamples[idx] > 0 && allSamples[idx] < 1000)
        {
            allSamples[idx] = 1;
        }
        else
        {
            allSamples[idx] /= 1000;
        }
        totalSamples += allSamples[idx];
    }
    int target  = totalSamples / numProcessors;
    target      = (target <= 0 ? 1 : target); // Correction for when we have
    // nothing to render.
    int tooHigh = (int) (target*1.5);
    int tooLow  = (int) (target*1.);

    int currentPartition = 0;
    int amountForCurrentPartition = 0;
    partitionStartsOnScanline[currentPartition] = first_scanline;
    for (i = first_scanline ; i < last_scanline ; i++)
    {
        // Need 0-based array index; see comment above.
        size_t idx = i - first_scanline;

        if (amountForCurrentPartition + allSamples[idx] > tooHigh)
        {
            if (amountForCurrentPartition > tooLow &&
                    currentPartition+1 < numProcessors)
            {
                //
                // If we added the next scanline, we would be too high.  Also,
                // the number of scanlines added to the current partition is
                // sufficient to not be ridiculously low.  Declare this
                // partition closed off and start the next one.
                //
                partitionStopsOnScanline[currentPartition] = i-1;
                currentPartition++;
                amountForCurrentPartition = 0;
                partitionStartsOnScanline[currentPartition] = i;
            }
        }

        stpAssignments[idx] = currentPartition;
        amountForCurrentPartition += allSamples[idx];
    }
    partitionStopsOnScanline[currentPartition] = last_scanline-1;
    currentPartition++;

    //
    // We may have not assigned the last few processors some partitions, so
    // give them the equivalent of nothing.
    //
    while (currentPartition < numProcessors)
    {
        partitionStartsOnScanline[currentPartition] = last_scanline+1;
        partitionStopsOnScanline[currentPartition]  = last_scanline;
        currentPartition++;
    }

    establishedPartitionBoundaries = true;
}
void
avtIndividualChordLengthDistributionQuery::PostExecute(void)
{
    int   i;

    int times = 0;
    char name[1024];
    sprintf(name, "cld_i%d.ult", times++);

    if (PAR_Rank() == 0)
    {
        bool lookingForUnused = true;
        while (lookingForUnused)
        {
            ifstream ifile(name);
            if (ifile.fail())
                lookingForUnused = false;
            else
                sprintf(name, "cld_i%d.ult", times++);
        }
    }

    char msg[1024];
    sprintf(msg, "The chord length distribution has been outputted as an "
                 "Ultra file (%s), which can then be imported into VisIt.", 
                 name);
    SetResultMessage(msg);
    SetResultValue(0.);

    int *nc2 = new int[numBins];
    SumIntArrayAcrossAllProcessors(numChords, nc2, numBins);
    delete [] numChords;
    numChords = nc2;

    if (PAR_Rank() == 0)
    {
        double binWidth = (maxLength-minLength) / numBins;
        double totalArea = 0.;
        for (i = 0 ; i < numBins ; i++)
            totalArea += binWidth*numChords[i];
        if (totalArea == 0.)
        {
            sprintf(msg, "The chord length distribution could not be "
                   "calculated because none of the lines intersected "
                   "the data set. If you have used a fairly large number "
                   "of lines, then this may be indicative of an error state.");
            SetResultMessage(msg);
            return;
        }

        ofstream ofile(name);
        if (ofile.fail())
        {
            sprintf(msg, "Unable to write out file containing distribution.");
            SetResultMessage(msg);
        }
        if (!ofile.fail())
            ofile << "# Chord length distribution - individual" << endl;

        MapNode result_node;
        doubleVector curve;

        for (int i = 0 ; i < numBins ; i++)
        {
            double x1 = minLength + (i)*binWidth;
            double x2 = minLength + (i+1)*binWidth;
            double y = numChords[i] / totalArea; // Make it be a distribution ...
                                                 // the area under the curve: 1
            curve.push_back(x1);
            curve.push_back(y);
            curve.push_back(x2);
            curve.push_back(y);
            if (!ofile.fail())
            {
                ofile << x1 << " " << y << endl;
                ofile << x2 << " " << y << endl;
            }
        }
        result_node["chord_length_distribution_individual"] = curve;
        SetXmlResult(result_node.ToXML());
        SetResultValues(curve);
    }
}