void Histogram::printWithMultiplier(ostream& out, double multiplier) const
{
  if (m_binsize == -1) {
    out << "[binsize: log2 ";
  } else {
    out << "[binsize: " << m_binsize << " ";
  }
  out << "max: " << m_max << " ";
  out << "count: " << m_count << " ";
  //  out << "total: " <<  m_sumSamples << " ";
  if (m_count == 0) {
    out << "average: NaN |";
    out << "standard deviation: NaN |";
  } else {
    out << "average: " << setw(5) << ((double) m_sumSamples)/m_count << " | ";
    out << "standard deviation: " << getStandardDeviation() << " |";
  }
  for (int i = 0; i < m_bins && i <= m_largest_bin; i++) {
    if (multiplier == 1.0) {
      out << " " << m_data[i];
    } else {
      out << " " << double(m_data[i]) * multiplier;
    }
  }
  out << " ]";
}
Beispiel #2
0
//-=========================================================
//     Arguments: in_pVectors          - source vectors to be
//                                       quantized
//                in_numInputVectors   - number of input Vectors
//                out_pOutputVectors   - array for returned vectors
//                io_pNumOutputVectors - On input, contains
//                                  the quantization target,
//                                  i.e., the number of desired
//                                  "best" vectors.  On output,
//                                  contains the number that
//                                  were actually created.
//
//       Returns: VQ_SUCCESS or VQ_FAILURE
//
//   Description: Uses PCA to quantize the inputs to the
//                 "best" set of representative vectors.
//
//         Notes: Note that the CALLING function is responsible
//                 for allocating the memory assigned to the
//                 elements of the output vector array.  This
//                 function neither allocates nor free's any
//                 memory for that purpose.
//-=========================================================
int
quantizeVectors(const quantVector* in_pVectors,
                const Int32        in_numInputVectors,
                quantVector*       out_pOutputVectors,
                Int32&             io_rNumOutputVectors)
{
   // Sanity checks
   //
   AssertFatal(in_pVectors != NULL, "NULL vector input");
   AssertFatal(in_numInputVectors > 0, "Bad numVectors");
   AssertFatal(out_pOutputVectors != NULL, "output Array NULL");
   AssertFatal(io_rNumOutputVectors > 0, "Error, bad quant request");
   
   if (in_numInputVectors <= io_rNumOutputVectors) {
      // Well, in this case we have an easy time of it!
      //  Just copy the input vectors to the output
      //  array and be done with it!  (return rather.)
      //
      for (int i = 0; i < in_numInputVectors; i++) {
         out_pOutputVectors[i].numDim = in_pVectors[i].numDim;
         memcpy(out_pOutputVectors[i].pElem, in_pVectors[i].pElem,
                sizeof(double) * in_pVectors[i].numDim);
      }
      io_rNumOutputVectors = in_numInputVectors;
      
      return VQ_SUCCESS;
   }

   // Ah well, on to the real work.  First, create the
   //  quantization table, with the appropriate number
   //  of entries, and insert the entire input set into
   //  the first entry.
   //
   quantTableEntry *pQuantEntries  = new quantTableEntry[io_rNumOutputVectors];
   int              currEndOfTable = 0;
   
   //  Create the sortVector array that we will use to sort the
   // input quantVectors
   //
   sortVector *pSortVectors = new sortVector[in_numInputVectors];
   for (int v = 0; v < in_numInputVectors; v++) {
      pSortVectors[v].pQVector = &in_pVectors[v];
   }

   // Create the base quantEntry
   //
   createQuantTableEntry(&pQuantEntries[0], pSortVectors,
                         in_numInputVectors);
   
   // Ok, now the meat of the process.  While there are fewer
   //  than the desired number of output entries, find the
   //  entry that maximizes (STD_DEV * numVectors), and
   //  split it into two sub-partitions. 
   // Note that we are guaranteed to always have a partition with
   //  > 1 entries at all times during this loop, so there is never
   //  a chance of trying to split a singular partition.  However,
   //  the danger exists that we could wind up with partitions that
   //  contain multilple copies of the same vector, so we check for
   //  zero STD_DEV, and abort at that point...
   //
   while (++currEndOfTable < io_rNumOutputVectors) {

      int maxEntry     = -1;
      double maxStdDev = -1.0;
   
      // Find "worst" table entry
      //
      for (int i = 0; i < currEndOfTable; i++) {
         double temp;
         if (pQuantEntries[i].standardDev < 0) {
            temp = getStandardDeviation(&pQuantEntries[i]);
            temp *= pQuantEntries[i].totalWeight;
            pQuantEntries[i].standardDev = temp;
         } else {
            temp = pQuantEntries[i].standardDev;
         }
         
         if (temp > maxStdDev) {
            maxStdDev = temp;
            maxEntry = i;
         }
      }
      
      // Check to make sure that we haven't homogenized our entries
      //
      const double minStdDev = 1e-10;
      if (maxStdDev <= minStdDev) {
         // Ok, break out of the while loop, we're done.
         //
         break;
      }
      
      sortQuantEntry(&pQuantEntries[maxEntry]);
      splitQuantEntry(&pQuantEntries[maxEntry],
                      &pQuantEntries[currEndOfTable]);
   }
   
   // Ok.  At this point, we have a number of partitioned vectors,
   //  in nice neat buckets, (currEndOfTable) in number.  Write
   //  the number of output vectors in io_rNumOutputVectors,
   //  extract the center of masses from the buckets, and put these
   //  into the output array.
   //
   io_rNumOutputVectors = currEndOfTable;
   
   for (int i = 0; i < io_rNumOutputVectors; i++) {
      calculateCenterOfMass(&pQuantEntries[i], &out_pOutputVectors[i]);
   }
   
   // Yay!  Done and Successful!  Clean-up section...
   //
   delete [] pSortVectors;
   delete [] pQuantEntries;

   return VQ_SUCCESS;
}
Beispiel #3
0
void normalizeData(double* data, int asize) {
   double mean = getMean(data, asize);
   double sd =  getStandardDeviation(mean, data, asize);
   adjustData(data, 12, mean, sd);
}
Beispiel #4
0
int main(int argc, char** argv) {
	// process the command-line options
	checkOptions(options, argc, argv);
	HumdrumFile infile;

	if (options.getArgCount() < 1) {
			infile.read(cin);
	} else {
			infile.read(options.getArg(1));
	}

	if (field <= 0) { // field counter starts at 1
			field = findBestField(infile);
	}
	if (field > infile.getMaxTracks()) {
			field = infile.getMaxTracks();
	}

	vector<Datum>  data;
	extractNumbers(infile, field, data);

	if (inputQ) {
			printRawNumbers(data, fullQ);
	}

	double mean;
	double sd;



	if (meanQ) {
			mean = myMean;
	} else {
			mean = getMean(data);
	}

	if (sdQ) {
			sd = mySd;
	} else {
			if (sampleQ) {
				sd = getSampleSD(mean, data);
			} else {
				sd = getStandardDeviation(mean, data);
			}
	}


	if (statQ) {
			cout << "Mean:\t" << mean << "\n";
			cout << "SD:\t" << sd << "\n";
	}
	if (inputQ) {
			exit(0);
	}
	if (statQ && !rawQ) {
			exit(0);
	}

	adjustData(data, mean, sd, reverseQ);

	if (rawQ) {
			printRawNumbers(data, fullQ);
			exit(0);
	} else if (replaceQ) {
			printDataReplace(data, field, infile, mean, sd);
	} else if (prependQ) {
			printDataPrepend(data, field, infile, mean, sd);
	} else if (appendQ) {
			printDataAppend(data, field, infile, mean, sd);
	} else {
			printDataSingle(data, field, infile, mean, sd);
	}

	return 0;
}