QgsRasterBlock * QgsRasterDataProvider::block( int theBandNo, QgsRectangle const & theExtent, int theWidth, int theHeight ) { QgsDebugMsg( QString( "theBandNo = %1 theWidth = %2 theHeight = %3" ).arg( theBandNo ).arg( theWidth ).arg( theHeight ) ); QgsDebugMsg( QString( "theExtent = %1" ).arg( theExtent.toString() ) ); QgsRasterBlock *block = new QgsRasterBlock( dataType( theBandNo ), theWidth, theHeight, noDataValue( theBandNo ) ); if ( block->isEmpty() ) { QgsDebugMsg( "Couldn't create raster block" ); return block; } // Read necessary extent only QgsRectangle tmpExtent = extent().intersect( &theExtent ); if ( tmpExtent.isEmpty() ) { QgsDebugMsg( "Extent outside provider extent" ); block->setIsNoData(); return block; } double xRes = theExtent.width() / theWidth; double yRes = theExtent.height() / theHeight; double tmpXRes, tmpYRes; double providerXRes = 0; double providerYRes = 0; if ( capabilities() & ExactResolution ) { providerXRes = extent().width() / xSize(); providerYRes = extent().height() / ySize(); tmpXRes = qMax( providerXRes, xRes ); tmpYRes = qMax( providerYRes, yRes ); if ( doubleNear( tmpXRes, xRes ) ) tmpXRes = xRes; if ( doubleNear( tmpYRes, yRes ) ) tmpYRes = yRes; } else { tmpXRes = xRes; tmpYRes = yRes; } if ( tmpExtent != theExtent || tmpXRes > xRes || tmpYRes > yRes ) { // Read smaller extent or lower resolution // Calculate row/col limits (before tmpExtent is aligned) int fromRow = qRound(( theExtent.yMaximum() - tmpExtent.yMaximum() ) / yRes ); int toRow = qRound(( theExtent.yMaximum() - tmpExtent.yMinimum() ) / yRes ) - 1; int fromCol = qRound(( tmpExtent.xMinimum() - theExtent.xMinimum() ) / xRes ) ; int toCol = qRound(( tmpExtent.xMaximum() - theExtent.xMinimum() ) / xRes ) - 1; QgsDebugMsg( QString( "fromRow = %1 toRow = %2 fromCol = %3 toCol = %4" ).arg( fromRow ).arg( toRow ).arg( fromCol ).arg( toCol ) ); if ( fromRow < 0 || fromRow >= theHeight || toRow < 0 || toRow >= theHeight || fromCol < 0 || fromCol >= theWidth || toCol < 0 || toCol >= theWidth ) { // Should not happen QgsDebugMsg( "Row or column limits out of range" ); return block; } // If lower source resolution is used, the extent must beS aligned to original // resolution to avoid possible shift due to resampling if ( tmpXRes > xRes ) { int col = floor(( tmpExtent.xMinimum() - extent().xMinimum() ) / providerXRes ); tmpExtent.setXMinimum( extent().xMinimum() + col * providerXRes ); col = ceil(( tmpExtent.xMaximum() - extent().xMinimum() ) / providerXRes ); tmpExtent.setXMaximum( extent().xMinimum() + col * providerXRes ); } if ( tmpYRes > yRes ) { int row = floor(( extent().yMaximum() - tmpExtent.yMaximum() ) / providerYRes ); tmpExtent.setYMaximum( extent().yMaximum() - row * providerYRes ); row = ceil(( extent().yMaximum() - tmpExtent.yMinimum() ) / providerYRes ); tmpExtent.setYMinimum( extent().yMaximum() - row * providerYRes ); } int tmpWidth = qRound( tmpExtent.width() / tmpXRes ); int tmpHeight = qRound( tmpExtent.height() / tmpYRes ); tmpXRes = tmpExtent.width() / tmpWidth; tmpYRes = tmpExtent.height() / tmpHeight; QgsDebugMsg( QString( "Reading smaller block tmpWidth = %1 theHeight = %2" ).arg( tmpWidth ).arg( tmpHeight ) ); QgsDebugMsg( QString( "tmpExtent = %1" ).arg( tmpExtent.toString() ) ); block->setIsNoData(); QgsRasterBlock *tmpBlock = new QgsRasterBlock( dataType( theBandNo ), tmpWidth, tmpHeight, noDataValue( theBandNo ) ); readBlock( theBandNo, tmpExtent, tmpWidth, tmpHeight, tmpBlock->data() ); int pixelSize = dataTypeSize( theBandNo ); double xMin = theExtent.xMinimum(); double yMax = theExtent.yMaximum(); double tmpXMin = tmpExtent.xMinimum(); double tmpYMax = tmpExtent.yMaximum(); for ( int row = fromRow; row <= toRow; row++ ) { double y = yMax - ( row + 0.5 ) * yRes; int tmpRow = floor(( tmpYMax - y ) / tmpYRes ); for ( int col = fromCol; col <= toCol; col++ ) { double x = xMin + ( col + 0.5 ) * xRes; int tmpCol = floor(( x - tmpXMin ) / tmpXRes ); if ( tmpRow < 0 || tmpRow >= tmpHeight || tmpCol < 0 || tmpCol >= tmpWidth ) { QgsDebugMsg( "Source row or column limits out of range" ); block->setIsNoData(); // so that the problem becomes obvious and fixed delete tmpBlock; return block; } size_t tmpIndex = tmpRow * tmpWidth + tmpCol; size_t index = row * theWidth + col; char *tmpBits = tmpBlock->bits( tmpIndex ); char *bits = block->bits( index ); if ( !tmpBits ) { QgsDebugMsg( QString( "Cannot get input block data tmpRow = %1 tmpCol = %2 tmpIndex = %3." ).arg( tmpRow ).arg( tmpCol ).arg( tmpIndex ) ); continue; } if ( !bits ) { QgsDebugMsg( "Cannot set output block data." ); continue; } memcpy( bits, tmpBits, pixelSize ); } } delete tmpBlock; } else { readBlock( theBandNo, theExtent, theWidth, theHeight, block->data() ); } // apply user no data values // TODO: there are other readBlock methods where no data are not applied QList<QgsRasterBlock::Range> myNoDataRangeList = userNoDataValue( theBandNo ); if ( !myNoDataRangeList.isEmpty() ) { double myNoDataValue = noDataValue( theBandNo ); size_t size = theWidth * theHeight; for ( size_t i = 0; i < size; i++ ) { double value = block->value( i ); if ( QgsRasterBlock::valueInRange( value, myNoDataRangeList ) ) { block->setValue( i, myNoDataValue ); } } } return block; }
void PoseClusteringShiftSuperimposer::run(const ConsensusMap & map_model, const ConsensusMap & map_scene, TransformationDescription & transformation) { typedef ConstRefVector<ConsensusMap> PeakPointerArray_; typedef Math::LinearInterpolation<double, double> LinearInterpolationType_; LinearInterpolationType_ shift_hash_; // OLD STUFF // LinearInterpolationType_ scaling_hash_1; // LinearInterpolationType_ scaling_hash_2; // LinearInterpolationType_ shift_hash_; // LinearInterpolationType_ rt_high_hash_; /// Maximum deviation in mz of two partner points const double mz_pair_max_distance = param_.getValue("mz_pair_max_distance"); /// Size of each shift bucket const double shift_bucket_size = param_.getValue("shift_bucket_size"); const UInt struc_elem_length_datapoints = 21; // MAGIC ALERT: number of data points in structuring element for tophat filter, which removes baseline from histogram const double scaling_histogram_crossing_slope = 3.0; // MAGIC ALERT: used when distinguishing noise level and enriched histogram bins const double scaling_cutoff_stdev_multiplier = 1.5; // MAGIC ALERT: multiplier for stdev in cutoff for outliers const UInt loops_mean_stdev_cutoff = 3; // MAGIC ALERT: number of loops in stdev cutoff for outliers startProgress(0, 100, "shift pose clustering"); UInt actual_progress = 0; setProgress(++actual_progress); // Optionally, we will write dumps of the hash table buckets. bool do_dump_buckets = false; String dump_buckets_basename; if (param_.getValue("dump_buckets") != "") { do_dump_buckets = true; dump_buckets_basename = param_.getValue("dump_buckets"); } setProgress(++actual_progress); // Even more optionally, we will write dumps of the hashed pairs. bool do_dump_pairs = false; String dump_pairs_basename; if (param_.getValue("dump_pairs") != "") { do_dump_pairs = true; dump_pairs_basename = param_.getValue("dump_pairs"); } setProgress(++actual_progress); //************************************************************************** // Select the most abundant data points only. After that, disallow modifications // (we tend to have annoying issues with const_iterator versus iterator). PeakPointerArray_ model_map_ini(map_model.begin(), map_model.end()); const PeakPointerArray_ & model_map(model_map_ini); PeakPointerArray_ scene_map_ini(map_scene.begin(), map_scene.end()); const PeakPointerArray_ & scene_map(scene_map_ini); { // truncate the data as necessary // casting to SignedSize is done on PURPOSE here! (num_used_points will be maximal if -1 is used) const Size num_used_points = (SignedSize) param_.getValue("num_used_points"); if (model_map_ini.size() > num_used_points) { model_map_ini.sortByIntensity(true); model_map_ini.resize(num_used_points); } model_map_ini.sortByComparator(Peak2D::MZLess()); setProgress(++actual_progress); if (scene_map_ini.size() > num_used_points) { scene_map_ini.sortByIntensity(true); scene_map_ini.resize(num_used_points); } scene_map_ini.sortByComparator(Peak2D::MZLess()); setProgress(++actual_progress); // Note: model_map_ini and scene_map_ini will not be used further below } setProgress((actual_progress = 10)); //************************************************************************** // Preprocessing // get RT ranges (NOTE: we trust that min and max have been updated in the // ConsensusMap::convert() method !) const double model_low = map_model.getMin()[ConsensusFeature::RT]; const double scene_low = map_scene.getMin()[ConsensusFeature::RT]; const double model_high = map_model.getMax()[ConsensusFeature::RT]; const double scene_high = map_scene.getMax()[ConsensusFeature::RT]; // OLD STUFF // const double rt_low = (maps[0].getMin()[ConsensusFeature::RT] + maps[1].getMin()[ConsensusFeature::RT]) / 2.; // const double rt_high = (maps[0].getMax()[ConsensusFeature::RT] + maps[1].getMax()[ConsensusFeature::RT]) / 2.; // Initialize the hash tables: shift_hash_ // OLD STUFF: was: rt_scaling_hash_, rt_low_hash_, and rt_high_hash_ { // (over)estimate the required number of buckets for shifting double max_shift = param_.getValue("max_shift"); // actually the largest possible shift can be much smaller, depending on the data do { if (max_shift < 0) max_shift = -max_shift; // ...ml@@@mh........ , ........ml@@@mh... // ........sl@@@sh... , ...sl@@@sh........ double diff; diff = model_high - scene_low; if (diff < 0) diff = -diff; if (max_shift > diff) max_shift = diff; diff = model_low - scene_high; if (diff < 0) diff = -diff; if (max_shift > diff) max_shift = diff; } while (0); const Int shift_buckets_num_half = 4 + (Int) ceil((max_shift) / shift_bucket_size); const Int shift_buckets_num = 1 + 2 * shift_buckets_num_half; shift_hash_.getData().clear(); shift_hash_.getData().resize(shift_buckets_num); shift_hash_.setMapping(shift_bucket_size, shift_buckets_num_half, 0); } setProgress(++actual_progress); //************************************************************************** // compute the ratio of the total intensities of both maps, for normalization double total_intensity_ratio; do { double total_int_model_map = 0; for (Size i = 0; i < model_map.size(); ++i) { total_int_model_map += model_map[i].getIntensity(); } setProgress(++actual_progress); double total_int_scene_map = 0; for (Size i = 0; i < scene_map.size(); ++i) { total_int_scene_map += scene_map[i].getIntensity(); } setProgress(++actual_progress); // ... and finally ... total_intensity_ratio = total_int_model_map / total_int_scene_map; } while (0); // (the extra syntax helps with code folding in eclipse!) setProgress((actual_progress = 20)); /// The serial number is incremented for each invocation of this, to avoid overwriting of hash table dumps. static Int dump_buckets_serial = 0; ++dump_buckets_serial; //************************************************************************** // Hashing // Compute the transformations between each point pair in the model map // and each point pair in the scene map and hash the shift // transformation. // To speed up the calculation of the final transformation, we confine the number of // considered point pairs. We match a point p in the model map only onto those points p' // in the scene map that lie in a certain mz interval. Size const model_map_size = model_map.size(); // i /* OLD STUFF: also: j */ Size const scene_map_size = scene_map.size(); // k /* OLD STUFF: also: l */ const double winlength_factor_baseline = 0.1; // MAGIC ALERT: Each window is given unit weight. If there are too many pairs for a window, the individual contributions will be very small, but running time will be high, so we provide a cutoff for this. Typically this will exclude compounds which elute over the whole retention time range from consideration. /////////////////////////////////////////////////////////////////// // Hashing: Estimate the shift do // begin of hashing (the extra syntax helps with code folding in eclipse!) { String dump_pairs_filename; std::ofstream dump_pairs_file; if (do_dump_pairs) { dump_pairs_filename = dump_pairs_basename + String(dump_buckets_serial); dump_pairs_file.open(dump_pairs_filename.c_str()); dump_pairs_file << "#" << ' ' << "i" << ' ' << "k" << std::endl; } setProgress(++actual_progress); // first point in model map for (Size i = 0, i_low = 0, i_high = 0, k_low = 0, k_high = 0; i < model_map_size - 1; ++i) { setProgress(actual_progress + float(i) / model_map_size * 10.f); // Adjust window around i in model map while (i_low < model_map_size && model_map[i_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance) ++i_low; while (i_high < model_map_size && model_map[i_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance) ++i_high; double i_winlength_factor = 1. / (i_high - i_low); i_winlength_factor -= winlength_factor_baseline; if (i_winlength_factor <= 0) continue; // Adjust window around k in scene map while (k_low < scene_map_size && scene_map[k_low].getMZ() < model_map[i].getMZ() - mz_pair_max_distance) ++k_low; while (k_high < scene_map_size && scene_map[k_high].getMZ() <= model_map[i].getMZ() + mz_pair_max_distance) ++k_high; // first point in scene map for (Size k = k_low; k < k_high; ++k) { double k_winlength_factor = 1. / (k_high - k_low); k_winlength_factor -= winlength_factor_baseline; if (k_winlength_factor <= 0) continue; // compute similarity of intensities i k double similarity_ik; { const double int_i = model_map[i].getIntensity(); const double int_k = scene_map[k].getIntensity() * total_intensity_ratio; similarity_ik = (int_i < int_k) ? int_i / int_k : int_k / int_i; // weight is inverse proportional to number of elements with similar mz similarity_ik *= i_winlength_factor; similarity_ik *= k_winlength_factor; // VV_(int_i<<' '<<int_k<<' '<<int_similarity_ik); } // compute the transformation (i) -> (k) double shift = model_map[i].getRT() - scene_map[k].getRT(); // hash the images of scaling, rt_low and rt_high into their respective hash tables shift_hash_.addValue(shift, similarity_ik); if (do_dump_pairs) { dump_pairs_file << i << ' ' << model_map[i].getRT() << ' ' << model_map[i].getMZ() << ' ' << k << ' ' << scene_map[k].getRT() << ' ' << scene_map[k].getMZ() << ' ' << similarity_ik << ' ' << std::endl; } } // k } // i } while (0); // end of hashing (the extra syntax helps with code folding in eclipse!) setProgress((actual_progress = 30)); /////////////////////////////////////////////////////////////////// // work on shift_hash_ // double shift_low; // double shift_centroid; // double shift_high; // OLD STUFF // double shift_low; double shift_centroid; // double shift_high; do { UInt filtering_stage = 0; // optionally, dump before filtering String dump_buckets_filename; std::ofstream dump_buckets_file; if (do_dump_buckets) { dump_buckets_filename = dump_buckets_basename + "_" + String(dump_buckets_serial); dump_buckets_file.open(dump_buckets_filename.c_str()); VV_(dump_buckets_filename); dump_buckets_file << "# shift hash table buckets dump ( scale, height ) : " << dump_buckets_filename << std::endl; dump_buckets_file << "# unfiltered hash data\n"; for (Size index = 0; index < shift_hash_.getData().size(); ++index) { const double image = shift_hash_.index2key(index); const double height = shift_hash_.getData()[index]; dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n'; } dump_buckets_file << '\n'; } ++filtering_stage; setProgress(++actual_progress); // apply tophat filter to histogram MorphologicalFilter morph_filter; Param morph_filter_param; morph_filter_param.setValue("struc_elem_unit", "DataPoints"); morph_filter_param.setValue("struc_elem_length", double(struc_elem_length_datapoints)); morph_filter_param.setValue("method", "tophat"); morph_filter.setParameters(morph_filter_param); LinearInterpolationType_::container_type buffer(shift_hash_.getData().size()); morph_filter.filterRange(shift_hash_.getData().begin(), shift_hash_.getData().end(), buffer.begin()); shift_hash_.getData().swap(buffer); // optionally, dump after filtering if (do_dump_buckets) { dump_buckets_file << "# tophat filtered hash data\n"; for (Size index = 0; index < shift_hash_.getData().size(); ++index) { const double image = shift_hash_.index2key(index); const double height = shift_hash_.getData()[index]; dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n'; } dump_buckets_file << '\n'; } setProgress(++actual_progress); ++filtering_stage; // compute freq_cutoff using a fancy criterion to distinguish between the noise level of the histogram and enriched histogram bins double freq_cutoff_low; do { { std::copy(shift_hash_.getData().begin(), shift_hash_.getData().end(), buffer.begin()); std::sort(buffer.begin(), buffer.end(), std::greater<double>()); double freq_intercept = shift_hash_.getData().front(); double freq_slope = (shift_hash_.getData().back() - shift_hash_.getData().front()) / double(buffer.size()) / scaling_histogram_crossing_slope; if (!freq_slope || !buffer.size()) { // in fact these conditions are actually impossible, but let's be really sure ;-) freq_cutoff_low = 0; } else { Size index = 1; // not 0 (!) while (buffer[index] >= freq_intercept + freq_slope * double(index)) { ++index; } freq_cutoff_low = buffer[--index]; // note that we have index >= 1 } } } while (0); setProgress(++actual_progress); // apply freq_cutoff, setting smaller values to zero for (Size index = 0; index < shift_hash_.getData().size(); ++index) { if (shift_hash_.getData()[index] < freq_cutoff_low) { shift_hash_.getData()[index] = 0; } } setProgress(++actual_progress); // optionally, dump after noise filtering using freq_cutoff if (do_dump_buckets) { dump_buckets_file << "# after freq_cutoff, which is: " << freq_cutoff_low << '\n'; for (Size index = 0; index < shift_hash_.getData().size(); ++index) { const double image = shift_hash_.index2key(index); const double height = shift_hash_.getData()[index]; dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n'; } dump_buckets_file << '\n'; } setProgress(++actual_progress); // iterative cut-off based on mean and stdev - relies upon scaling_cutoff_stdev_multiplier which is a bit hard to set right. { Math::BasicStatistics<double> statistics; std::vector<double>::const_iterator data_begin = shift_hash_.getData().begin(); const Size data_size = shift_hash_.getData().size(); Size data_range_begin = 0; Size data_range_end = data_size; for (UInt loop = 0; loop < loops_mean_stdev_cutoff; ++loop) // MAGIC ALERT: number of loops { statistics.update(data_begin + data_range_begin, data_begin + data_range_end); double mean = statistics.mean() + data_range_begin; double stdev = sqrt(statistics.variance()); data_range_begin = floor(std::max<double>(mean - scaling_cutoff_stdev_multiplier * stdev, 0)); data_range_end = ceil(std::min<double>(mean + scaling_cutoff_stdev_multiplier * stdev + 1, data_size)); const double outside_mean = shift_hash_.index2key(mean); const double outside_stdev = stdev * shift_hash_.getScale(); // shift_low = (outside_mean - outside_stdev); shift_centroid = (outside_mean); // shift_high = (outside_mean + outside_stdev); if (do_dump_buckets) { dump_buckets_file << "# loop: " << loop << " mean: " << outside_mean << " stdev: " << outside_stdev << " (mean-stdev): " << outside_mean - outside_stdev << " (mean+stdev): " << outside_mean + outside_stdev << " data_range_begin: " << data_range_begin << " data_range_end: " << data_range_end << std::endl; } } setProgress(++actual_progress); } if (do_dump_buckets) { dump_buckets_file << "# EOF" << std::endl; dump_buckets_file.close(); } setProgress(80); } while (0); //************************************************************************************ // Estimate transform // Compute the shifts at the low and high ends by looking at (around) the fullest bins. double intercept; #if 1 // yes of course, use centroids for images of rt_low and rt_high intercept = shift_centroid; #else // ooh, use maximum bins instead (Note: this is a fossil which would disregard most of the above computations! The code is left here for developers/debugging only.) const Size rt_low_max_index = std::distance(shift_hash_.getData().begin(), std::max_element(shift_hash_.getData().begin(), shift_hash_.getData().end())); intercept = shift_hash_.index2key(rt_low_max_index); #endif VV_(intercept); setProgress(++actual_progress); // set trafo { Param params; params.setValue("slope", 1.0); params.setValue("intercept", intercept); TransformationDescription trafo; trafo.fitModel("linear", params); transformation = trafo; } setProgress(++actual_progress); endProgress(); return; } // run()
/** * This function synchronizes step size and number */ bool CTSSAProblem::sync() { bool success = true; C_FLOAT64 Tmp = *mpDuration; C_FLOAT64 StepSize = *mpStepSize; C_FLOAT64 StepNumber = (C_FLOAT64) * mpStepNumber; if (mStepNumberSetLast) { StepSize = Tmp / (C_FLOAT64) * mpStepNumber; /* Assure that the step size is not to small for machine accuracy */ if (fabs(StepSize) < 100.0 * std::numeric_limits< C_FLOAT64 >::epsilon() * fabs(*mpDuration)) { CCopasiMessage(CCopasiMessage::WARNING, MCTSSAProblem + 3, StepSize); StepSize = 100.0 * std::numeric_limits< C_FLOAT64 >::epsilon() * fabs(*mpDuration); /* Assure that the step size has the appropriate sign. */ StepSize = (Tmp < 0.0) ? - fabs(StepSize) : fabs(StepSize); StepNumber = fabs(ceil(Tmp / StepSize)); } } else { if (fabs(StepSize) < 100.0 * std::numeric_limits< C_FLOAT64 >::epsilon() * fabs(*mpDuration)) { CCopasiMessage(CCopasiMessage::WARNING, MCTSSAProblem + 3, StepSize); StepSize = 100.0 * std::numeric_limits< C_FLOAT64 >::epsilon() * fabs(*mpDuration); /* Assure that the step size has the appropriate sign. */ StepSize = (Tmp < 0.0) ? - fabs(StepSize) : fabs(StepSize); } StepNumber = fabs(ceil(Tmp / StepSize)); /* Protect against overflow */ if ((C_FLOAT64) ULONG_MAX < StepNumber) { CCopasiMessage(CCopasiMessage::WARNING, MCTSSAProblem + 2, StepNumber); StepNumber = (C_FLOAT64) ULONG_MAX; StepSize = Tmp / StepNumber; success = false; } /* Assure that the step size has the appropriate sign. */ StepSize = (Tmp < 0.0) ? - fabs(StepSize) : fabs(StepSize); } *mpStepSize = StepSize; *mpStepNumber = (unsigned C_INT32) StepNumber; if (!success) throw 1; return success; }
/* Main Program */ INT4 main ( INT4 argc, CHAR *argv[] ) { static LALStatus status; INT4 c; UINT4 i; REAL8 dt, totTime; REAL8 sampleRate = -1; REAL8 totalMass = -1, massRatio = -1; REAL8 lowFreq = -1, df, fLow; CHAR *outFile = NULL, *outFileLong = NULL, tail[50]; size_t optarg_len; REAL8 eta; REAL8 newtonianChirpTime, PN1ChirpTime, mergTime; UINT4 numPts; LIGOTimeGPS epoch; REAL8 offset; PhenomCoeffs coeffs; PhenomParams params; REAL4FrequencySeries *Aeff = NULL, *Phieff = NULL; COMPLEX8Vector *uFPlus = NULL, *uFCross = NULL; COMPLEX8 num; REAL4Vector *hPlus = NULL, *hCross = NULL; REAL4TimeSeries *hP = NULL, *hC = NULL; /* REAL4TimeSeries *hP = NULL, *hC = NULL;*/ REAL4FFTPlan *prevPlus = NULL, *prevCross = NULL; /*REAL4Vector *Freq = NULL;*/ UINT4 windowLength; INT4 hPLength; REAL8 linearWindow; /* getopt arguments */ struct option long_options[] = { {"mass-ratio", required_argument, 0, 'q'}, {"low-freq (Hz)", required_argument, 0, 'f'}, {"total-mass (M_sun)", required_argument, 0, 'm'}, {"sample-rate", required_argument, 0, 's'}, {"output-file", required_argument, 0, 'o'}, {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'V'}, {0, 0, 0, 0} }; /* parse the arguments */ while ( 1 ) { /* getopt_long stores long option here */ int option_index = 0; /* parse command line arguments */ c = getopt_long_only( argc, argv, "q:t:d:hV", long_options, &option_index ); /* detect the end of the options */ if ( c == -1 ) { break; } switch ( c ) { case 0: fprintf( stderr, "Error parsing option '%s' with argument '%s'\n", long_options[option_index].name, optarg ); exit( 1 ); break; case 'h': /* help message */ print_usage( argv[0] ); exit( 0 ); break; case 'V': /* print version information and exit */ fprintf( stdout, "%s - Compute Ajith's Phenomenological Waveforms " \ "(arXiv:0710.2335) and output them to a plain text file\n" \ "CVS Version: %s\nCVS Tag: %s\n", PROGRAM_NAME, CVS_ID_STRING, \ CVS_NAME_STRING ); exit( 0 ); break; case 'q': /* set mass ratio */ massRatio = atof( optarg ); break; case 'f': /* set low freq */ lowFreq = atof( optarg ); break; case 'm': /* set total mass */ totalMass = atof( optarg ); break; case 's': /* set sample rate */ sampleRate = atof( optarg ); break; case 'o': /* set name of output file */ optarg_len = strlen(optarg) + 1; outFile = (CHAR *)calloc(optarg_len, sizeof(CHAR)); memcpy(outFile, optarg, optarg_len); break; case '?': print_usage( argv[0] ); exit( 1 ); break; default: fprintf( stderr, "ERROR: Unknown error while parsing options\n" ); print_usage( argv[0] ); exit( 1 ); } } if ( optind < argc ) { fprintf( stderr, "ERROR: Extraneous command line arguments:\n" ); while ( optind < argc ) { fprintf ( stderr, "%s\n", argv[optind++] ); } exit( 1 ); } /* * * * * * * * */ /* Main Program */ /* * * * * * * * */ eta = massRatio / pow(1. + massRatio, 2.); /* This freq low is the one used for the FFT */ /* fLow = 2.E-3/(totalMass*LAL_MTSUN_SI); */ fLow = lowFreq; /* Changed by Ajith. 5 May 2008 */ /* Phenomenological coefficients as in Ajith et. al */ GetPhenomCoeffsLongJena( &coeffs ); /* Compute phenomenologial parameters */ ComputeParamsFromCoeffs( ¶ms, &coeffs, eta, totalMass ); /* Check validity of arguments */ /* check we have freqs */ if ( totalMass < 0 ) { fprintf( stderr, "ERROR: --total-mass must be specified\n" ); exit( 1 ); } /* check we have mass ratio and delta t*/ if ( massRatio < 0 ) { fprintf( stderr, "ERROR: --mass-ratio must be specified\n" ); exit( 1 ); } if ( lowFreq < 0 ) { fprintf( stderr, "ERROR: --low-freq must be specified\n" ); exit( 1 ); } if ( sampleRate < 0 ) { fprintf( stderr, "ERROR: --sample-rate must be specified\n" ); exit( 1 ); } if ( lowFreq > params.fCut ) { fprintf( stderr, "\nERROR in --low-freq\n"\ "The value chosen for the low frequency is larger "\ "than the frequency at the merger.\n" "Frequency at the merger: %4.2f Hz\nPick either a lower value"\ " for --low-freq or a lower total mass\n\n", params.fCut); exit(1); } if ( lowFreq < fLow ) { fprintf( stderr, "\nERROR in --low-freq\n"\ "The value chosen for the low frequency is lower "\ "than the lowest frequency computed\nby the implemented FFT.\n" "Lowest frequency allowed: %4.2f Hz\nPick either a higher value"\ " for --low-freq or a higher total mass\n\n", fLow); exit(1); } if ( outFile == NULL ) { fprintf( stderr, "ERROR: --output-file must be specified\n" ); exit( 1 ); } /* Complete file name with details of the input variables */ sprintf(tail, "%s-Phenom_M%3.1f_R%2.1f.dat", outFile, totalMass, massRatio); optarg_len = strlen(tail) + strlen(outFile) + 1; outFileLong = (CHAR *)calloc(optarg_len, sizeof(CHAR)); strcpy(outFileLong, tail); /* check sample rate is enough */ if (sampleRate > 4.*params.fCut) /* Changed by Ajith. 5 May 2008 */ { dt = 1./sampleRate; } else { sampleRate = 4.*params.fCut; dt = 1./sampleRate; } /* Estimation of the time duration of the binary */ /* See Sathya (1994) for the Newtonian and PN1 chirp times */ /* The merger time is overestimated */ newtonianChirpTime = (5./(256.*eta))*pow(totalMass*LAL_MTSUN_SI,-5./3.)*pow(LAL_PI*fLow,-8./3.); PN1ChirpTime = 5.*(743.+924.*eta)/(64512.*eta*totalMass*LAL_MTSUN_SI*pow(LAL_PI*fLow,2.)); mergTime = 2000.*totalMass*LAL_MTSUN_SI; totTime = 1.2 * (newtonianChirpTime + PN1ChirpTime + mergTime); numPts = (UINT4) ceil(totTime/dt); df = 1/(numPts * dt); /* Compute Amplitude and Phase from the paper (Eq. 4.19) */ Aeff = XLALHybridP1Amplitude(¶ms, fLow, df, eta, totalMass, numPts/2+1); Phieff = XLALHybridP1Phase(¶ms, fLow, df, eta, totalMass, numPts/2 +1); /* Construct u(f) = Aeff*e^(i*Phieff) */ XLALComputeComplexVector(&uFPlus, &uFCross, Aeff, Phieff); /* Scale this to units of M */ for (i = 0; i < numPts/2 + 1; i++) { num = uFPlus->data[i]; num.re *= 1./(dt*totalMass*LAL_MTSUN_SI); num.im *= 1./(dt*totalMass*LAL_MTSUN_SI); uFPlus->data[i] = num; num = uFCross->data[i]; num.re *= 1./(dt*totalMass*LAL_MTSUN_SI); num.im *= 1./(dt*totalMass*LAL_MTSUN_SI); uFCross->data[i] = num; } /* Inverse Fourier transform */ LALCreateReverseREAL4FFTPlan( &status, &prevPlus, numPts, 0 ); LALCreateReverseREAL4FFTPlan( &status, &prevCross, numPts, 0 ); hPlus = XLALCreateREAL4Vector(numPts); hCross = XLALCreateREAL4Vector(numPts); LALReverseREAL4FFT( &status, hPlus, uFPlus, prevPlus ); LALReverseREAL4FFT( &status, hCross, uFCross, prevCross ); /* The LAL implementation of the FFT omits the factor 1/n */ for (i = 0; i < numPts; i++) { hPlus->data[i] /= numPts; hCross->data[i] /= numPts; } /* Create TimeSeries to store more info about the waveforms */ /* Note: it could be done easier using LALFreqTimeFFT instead of ReverseFFT */ epoch.gpsSeconds = 0; epoch.gpsNanoSeconds = 0; hP = XLALCreateREAL4TimeSeries("", &epoch, 0, dt, &lalDimensionlessUnit, numPts); hP->data = hPlus; hC = XLALCreateREAL4TimeSeries("", &epoch, 0, dt, &lalDimensionlessUnit, numPts); hC->data = hCross; /* Cutting off the part of the waveform with f < fLow */ /* Freq = XLALComputeFreq( hP, hC); hP = XLALCutAtFreq( hP, Freq, lowFreq); hC = XLALCutAtFreq( hC, Freq, lowFreq); */ /* multiply the last few samples of the time-series by a linearly * dropping window function in order to avid edges in the data * Added by Ajith 6 May 2008 */ hPLength = hP->data->length; windowLength = (UINT4) (20.*totalMass * LAL_MTSUN_SI/dt); for (i=1; i<= windowLength; i++){ linearWindow = (i-1.)/windowLength; hP->data->data[hPLength-i] *= linearWindow; hC->data->data[hPLength-i] *= linearWindow; } /* Convert t column to units of (1/M) */ /* offset *= (1./(totalMass * LAL_MTSUN_SI)); hP->deltaT *= (1./(totalMass * LAL_MTSUN_SI)); */ /* Set t = 0 at the merger (defined as the max of the NR wave) */ XLALFindNRCoalescenceTimeFromhoft( &offset, hP); XLALGPSAdd( &(hP->epoch), -offset); XLALGPSAdd( &(hC->epoch), -offset); /* Print waveforms to file */ LALPrintHPlusCross( hP, hC, outFileLong ); /* Free Memory */ XLALDestroyREAL4FrequencySeries(Aeff); XLALDestroyREAL4FrequencySeries(Phieff); XLALDestroyREAL4FFTPlan(prevPlus); XLALDestroyREAL4FFTPlan(prevCross); XLALDestroyCOMPLEX8Vector(uFPlus); XLALDestroyCOMPLEX8Vector(uFCross); XLALDestroyREAL4TimeSeries(hP); XLALDestroyREAL4TimeSeries(hC); /* XLALDestroyREAL4TimeSeries(hP); */ /* XLALDestroyREAL4TimeSeries(hC); */ return(0); }
int main(int argc, char ** argv) { if ( argc != 3 ) { fprintf(stderr, "Usage: %s <infile.iv> <outfile.iv>\n", argv[0]); return -1; } SoDB::init(); SoNodeKit::init(); SoInteraction::init(); SoGenerateSceneGraphAction::initClass(); SoTweakAction::initClass(); SoInput in; SoNode * scene, * graph; if ( !in.openFile(argv[1]) ) { fprintf(stderr, "%s: error opening \"%s\" for reading.\n", argv[0], argv[1]); return -1; } scene = SoDB::readAll(&in); if ( scene == NULL ) { fprintf(stderr, "%s: error parsing \"%s\"\n", argv[0], argv[1]); return -1; } scene->ref(); SoGenerateSceneGraphAction action; // action.setDropTypeIfNameEnabled(TRUE); action.apply(scene); graph = action.getGraph(); if ( graph == NULL ) { fprintf(stderr, "%s: error generating scene graph\n", argv[0]); return -1; } graph->ref(); scene->unref(); scene = NULL; // figure out camera settings and needed rendering canvas size SoGetBoundingBoxAction bbaction(SbViewportRegion(64,64)); // just something bbaction.apply(graph); SbBox3f bbox = bbaction.getBoundingBox(); SbVec3f min = bbox.getMin(); SbVec3f max = bbox.getMax(); float bwidth = max[0] - min[0]; float bheight = max[1] - min[1]; // fprintf(stdout, "min: %g %g %g\n", min[0], min[1], min[2]); // fprintf(stdout, "max: %g %g %g\n", max[0], max[1], max[2]); // place camera SoSearchAction search; search.setType(SoCamera::getClassTypeId()); search.setInterest(SoSearchAction::FIRST); search.apply(graph); SoPath * campath = search.getPath(); SoOrthographicCamera * cam = (SoOrthographicCamera *) campath->getTail(); assert(cam != NULL); SbVec3f pos = cam->position.getValue(); cam->position.setValue(SbVec3f(min[0] + ((max[0]-min[0])/2.0), min[1] + ((max[1]-min[1])/2.0), pos[2])); cam->height.setValue(bheight); if ( TRUE ) { // FIXME: only write .iv-scene if asked SoOutput out; if ( !out.openFile(argv[2]) ) { fprintf(stderr, "%s: error opening \"%s\" for writing.\n", argv[0], argv[2]); return -1; } SoWriteAction writer(&out); // writer.setCoinFormattingEnabled(TRUE); writer.apply(graph); } int width = (int) ceil(bwidth * 150.0) + 2; int height = (int) ceil(bheight * 150.0); fprintf(stderr, "image: %d x %d\n", width, height); if ( TRUE ) { // FIXME: only write image if asked SoOffscreenRenderer renderer(SbViewportRegion(width, height)); SoGLRenderAction * glra = renderer.getGLRenderAction(); glra->setNumPasses(9); // FIXME: auto-crop image afterwards? seems like it's a perfect fit right now renderer.setComponents(SoOffscreenRenderer::RGB_TRANSPARENCY); renderer.setBackgroundColor(SbColor(1.0,1.0,1.0)); renderer.render(graph); // FIXME: support command line option filename // FIXME: also support .eps renderer.writeToFile("output.png", "png"); } graph->unref(); return 0; }
int cScene::getTile(glm::vec3 p) { //std::cout << floor(playerx) << " " << ceil(playerz) + 0.5f << std::endl; return map[ceil(playerx + 0.1f)][ceil(playerz) + 0.5f]; }
static void GenerateGiantSteps(const GF2EX& f, const GF2EX& h, long l, long verbose) { double t; if (verbose) { cerr << "generating giant steps..."; t = GetTime(); } GF2EXModulus F; build(F, f); GF2EXArgument H; #if 0 double n2 = sqrt(double(F.n)); double n4 = sqrt(n2); double n34 = n2*n4; long sz = long(ceil(n34/sqrt(sqrt(2.0)))); #else long sz = 2*SqrRoot(F.n); #endif build(H, h, F, sz); GF2EX h1; h1 = h; long i; long HexOutput = GF2X::HexOutput; GF2X::HexOutput = 1; if (!use_files) { GiantStepFile.kill(); GiantStepFile.SetLength(l); } for (i = 1; i <= l-1; i++) { if (use_files) { ofstream s; OpenWrite(s, FileName(GF2EX_stem, "giant", i)); s << h1 << "\n"; s.close(); } else GiantStepFile(i) = h1; CompMod(h1, h1, H, F); if (verbose) cerr << "+"; } if (use_files) { ofstream s; OpenWrite(s, FileName(GF2EX_stem, "giant", i)); s << h1 << "\n"; s.close(); } else GiantStepFile(i) = h1; if (verbose) cerr << (GetTime()-t) << "\n"; GF2X::HexOutput = HexOutput; }
F_NONNULL static void config_cnameset(const char* res_name, const char* stanza, cnset_t* cnset, const vscf_data_t* cfg) { dmn_assert(res_name); dmn_assert(stanza); dmn_assert(cnset); dmn_assert(cfg); if(!vscf_is_hash(cfg)) log_fatal("plugin_weighted: resource '%s' stanza '%s' value must be a hash", res_name, stanza); cnset->count = vscf_hash_get_len(cfg); // service_types cnset->num_svcs = 0; const vscf_data_t* res_stypes = vscf_hash_get_data_byconstkey(cfg, "service_types", true); if (res_stypes) { cnset->count--; // minus one for service_types entry cnset->num_svcs = vscf_array_get_len(res_stypes); if(cnset->num_svcs) { cnset->svc_names = malloc(cnset->num_svcs * sizeof(char*)); for(unsigned i = 0; i < cnset->num_svcs; i++) { const vscf_data_t* this_svc_cfg = vscf_array_get_data(res_stypes, i); if(!vscf_is_simple(this_svc_cfg)) log_fatal("plugin_weighted: resource '%s' (%s): service_types values must be strings", res_name, stanza); cnset->svc_names[i] = strdup(vscf_simple_get_data(this_svc_cfg)); } } } else { cnset->num_svcs = 1; cnset->svc_names = malloc(sizeof(char*)); cnset->svc_names[0] = strdup(DEFAULT_SVCNAME); } // up threshold as double double up_thresh = 0.5; const vscf_data_t* thresh_cfg = vscf_hash_get_data_byconstkey(cfg, "up_thresh", true); if(thresh_cfg) { cnset->count--; // minus one for up_thresh entry if(!vscf_is_simple(thresh_cfg) || !vscf_simple_get_as_double(thresh_cfg, &up_thresh) || up_thresh <= 0.0 || up_thresh > 1.0) log_fatal("plugin_weighted: resource '%s' (%s): 'up_thresh' must be a floating point value in the range (0.0 - 1.0]", res_name, stanza); } // multi option is processed for count-correctness, but ignored (it's not legal // here, but may be present due to inheritance of defaults!) if(vscf_hash_get_data_byconstkey(cfg, "multi", true)) cnset->count--; if(cnset->count > MAX_ITEMS_PER_SET) log_fatal("plugin_weighted: resource '%s' (%s): number of cnames cannot be more than %u", res_name, stanza, MAX_ITEMS_PER_SET); if(!cnset->count) log_fatal("plugin_weighted: resource '%s' (%s): empty cname sets not allowed", res_name, stanza); cnset->items = calloc(cnset->count, sizeof(res_citem_t)); cname_iter_data_t cid = { .cnset = cnset, .res_name = res_name, .stanza = stanza, .item_idx = 0, }; vscf_hash_iterate(cfg, true, config_item_cname, &cid); cnset->weight = 0; for(unsigned i = 0; i < cnset->count; i++) { const unsigned cwt = cnset->items[i].weight; dmn_assert(cwt); cnset->weight += cwt; } dmn_assert(cnset->weight); cnset->up_weight = ceil(up_thresh * cnset->weight); } F_NONNULL static void config_auto(resource_t* res, const vscf_data_t* res_cfg) { dmn_assert(res); dmn_assert(res_cfg); dmn_assert(vscf_is_hash(res_cfg)); // mark all possible parameter-keys vscf_hash_get_data_byconstkey(res_cfg, "service_types", true); vscf_hash_get_data_byconstkey(res_cfg, "multi", true); vscf_hash_get_data_byconstkey(res_cfg, "up_thresh", true); // make a copy that contains no parameters, only item-name keys vscf_data_t* res_cfg_noparams = vscf_clone(res_cfg, true); if(!vscf_hash_get_len(res_cfg_noparams)) log_fatal("plugin_weighted: resource '%s' (direct) contains no weighted items", res->name); const char* first_name = vscf_hash_get_key_byindex(res_cfg_noparams, 0, NULL); const vscf_data_t* first_cfg = vscf_hash_get_data_byindex(res_cfg_noparams, 0); if(vscf_is_hash(first_cfg)) { // grouped address mode... if(!vscf_hash_get_len(first_cfg)) log_fatal("plugin_weighted: resource '%s' (direct): group '%s': contains no addresses", res->name, first_name); const char* lb_name = vscf_hash_get_key_byindex(first_cfg, 0, NULL); const vscf_data_t* lb_cfg = vscf_hash_get_data_byindex(first_cfg, 0); if(!vscf_is_array(lb_cfg) || !vscf_array_get_len(lb_cfg) || !vscf_is_simple(vscf_array_get_data(lb_cfg, 0))) log_fatal("plugin_weighted: resource '%s' (direct): group '%s': item '%s': value must be an array of [ IP, weight ]", res->name, first_name, lb_name); const char* first_addr_txt = vscf_simple_get_data(vscf_array_get_data(lb_cfg, 0)); dmn_anysin_t temp_sin; int addr_err = gdnsd_anysin_getaddrinfo(first_addr_txt, NULL, &temp_sin); if(addr_err) log_fatal("plugin_weighted: resource '%s' (direct): group '%s': item '%s': could not parse '%s' as an IP address: %s", res->name, first_name, lb_name, first_addr_txt, gai_strerror(addr_err)); if(temp_sin.sa.sa_family == AF_INET6) { res->addrs_v6 = calloc(1, sizeof(addrset_t)); config_addrset(res->name, "direct", true, res->addrs_v6, res_cfg); } else { dmn_assert(temp_sin.sa.sa_family == AF_INET); res->addrs_v4 = calloc(1, sizeof(addrset_t)); config_addrset(res->name, "direct", false, res->addrs_v4, res_cfg); } } else if(vscf_is_array(first_cfg)) { // ungrouped address, or cnames const vscf_data_t* first_ac = vscf_array_get_data(first_cfg, 0); if(!first_ac || !vscf_is_simple(first_ac)) log_fatal("plugin_weighted: resource '%s' (direct): item '%s': first element of array should be an IP address or CNAME string", res->name, first_name); dmn_anysin_t temp_sin; if(gdnsd_anysin_getaddrinfo(vscf_simple_get_data(first_ac), NULL, &temp_sin)) { // was not a valid address, try cnames mode res->cnames = calloc(1, sizeof(cnset_t)); config_cnameset(res->name, "direct", res->cnames, res_cfg); } else { // was a valid address, try addrset mode if(temp_sin.sa.sa_family == AF_INET6) { res->addrs_v6 = calloc(1, sizeof(addrset_t)); config_addrset(res->name, "direct", true, res->addrs_v6, res_cfg); } else { dmn_assert(temp_sin.sa.sa_family == AF_INET); res->addrs_v4 = calloc(1, sizeof(addrset_t)); config_addrset(res->name, "direct", false, res->addrs_v4, res_cfg); } } } else { log_fatal("plugin_weighted: resource '%s' (direct): item '%s': resource type not detectable (should be array of [ IP, weight ], array of [ CNAME, weight ], or hashed address group ...)", res->name, first_name); } vscf_destroy(res_cfg_noparams); }
static int renderer_overlay_wallpaper( GdkPixbuf *background, const char *wallpaper, MMB_Screen *screen, int clip ) { GError *error = NULL; GdkPixbuf *image = gdk_pixbuf_new_from_file( wallpaper, &error ); if ( error != NULL ) { fprintf( stderr, "Failed to parse image: %s\n" , error->message ); return 0; } double wp_width = gdk_pixbuf_get_width( image ); double wp_height = gdk_pixbuf_get_height( image ); for ( int monitor = 0; monitor < screen->num_monitors; monitor++ ) { MMB_Rectangle rectangle = screen->monitors[monitor]; double w_scale = wp_width/( double )( rectangle.w ); double h_scale = wp_height/( double )( rectangle.h ); // Picture is small then screen, center it. if ( w_scale < 1 && h_scale < 1 ) { gdk_pixbuf_copy_area( image, 0,0, wp_width, wp_height, background, rectangle.x + ( rectangle.w-wp_width )/2, rectangle.y + ( rectangle.h-wp_height )/2 ); } // Picture is smaller on one of the sides and we want to clip. else if ( clip && ( w_scale < 1 || h_scale < 1 ) ) { double x_off = ( ( float )rectangle.w-wp_width )/2.0; double y_off = ( ( float )rectangle.h-wp_height )/2.0; gdk_pixbuf_copy_area( image, -( x_off < 0 )*x_off,-( y_off < 0 )*y_off, wp_width+( x_off < 0 )*2*x_off, wp_height+( y_off < 0 )*2*y_off, background, rectangle.x + ( ( x_off> 0 )?x_off:0 ), rectangle.y + ( ( y_off> 0 )?y_off:0 ) ); } // Picture is bigger/equal then screen. // Scale to fit. else { int new_w= 0; int new_h = 0; double x_off = 0; double y_off = 0; if ( clip ) { if ( w_scale < h_scale ) { new_w = wp_width/w_scale; new_h = wp_height/w_scale; } else { new_w = wp_width/h_scale; new_h = wp_height/h_scale; } x_off = ( ( new_w-rectangle.w )/2.0 ); y_off = ( ( new_h-rectangle.h )/2.0 ); } else { if ( w_scale > h_scale ) { new_w = wp_width/w_scale; new_h = wp_height/w_scale; } else { new_w = wp_width/h_scale; new_h = wp_height/h_scale; } } GdkPixbuf *scaled_wp = gdk_pixbuf_scale_simple( image, new_w, new_h, GDK_INTERP_HYPER ); gdk_pixbuf_copy_area( scaled_wp, ( int )ceil( x_off ),( int )ceil( y_off ), new_w-ceil(x_off)*2, new_h-ceil(y_off)*2, background, rectangle.x + ( ( double )rectangle.w-new_w+x_off*2 )/2, rectangle.y + ( ( double )rectangle.h-new_h+y_off*2 )/2 ); g_object_unref( scaled_wp ); } } g_object_unref( image ); return 1; }
int send_file(HTHREAD_PTR descr, char *filename) { WSADATA wsa_data; SOCKET data_socket = INVALID_SOCKET; struct sockaddr_in send_data_addr; HPACKET packet; HPARTITION partition; FILE *fp; int return_code; unsigned long at_location, read_amount, tries; unsigned char packet_count; char control_message[MAX_INPUT_LENGTH]; if(fopen_s(&fp, filename, "rb") > 0) { memset(control_message, 0, sizeof(MAX_INPUT_LENGTH)); control_message[0] = CONTROL_MESSAGE_NO_SUCH_FILE; return_code = send(descr->socket, control_message, (int)strlen(control_message), 0); return 1; } for(tries = 0; tries < CONTROL_MESSAGE_RECV_RETRIES; tries++) { memset(control_message, 0, sizeof(MAX_INPUT_LENGTH)); return_code = recv(descr->socket, control_message, MAX_INPUT_LENGTH-1, 0); if(return_code < 1) { printf("failed to recv command: %d\n", WSAGetLastError()); closesocket(data_socket); return 0; } if(control_message[0] == CONTROL_MESSAGE_OK_START_SENDING) break; } if(tries >= CONTROL_MESSAGE_RECV_RETRIES) { printf("No CONTROL_MESSAGE_OK_START_SENDING from %s\n", inet_ntoa(descr->address.sin_addr)); closesocket(data_socket); return 0; } WSAStartup(MAKEWORD(2,2), &wsa_data); data_socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); send_data_addr.sin_family = AF_INET; send_data_addr.sin_port = htons(CONNECT_PORT_N);//descr->address.sin_port; send_data_addr.sin_addr = descr->address.sin_addr;//inet_addr("123.456.789.1"); packet.partition_id = 0; packet.reserved = 0; while(!feof(fp)) { memset(partition.packet_stats, 0, MAX_PARTITION_DIVISIONS+1); partition.actual_size = (unsigned long)fread(partition.data, 1, PARTITION_LENGTH_TOTAL, fp); packet_count = (unsigned char)ceil(partition.actual_size / PACKET_LENGTH_DATA); memset(control_message, 0, sizeof(MAX_INPUT_LENGTH)); sprintf_s(control_message, MAX_INPUT_LENGTH, " %u", partition.actual_size); control_message[0] = CONTROL_MESSAGE_SENDING_DATA; return_code = send(descr->socket, control_message, (int)strlen(control_message)+1, 0); if(return_code == SOCKET_ERROR) { printf("failed to send command: %d\n", WSAGetLastError()); closesocket(data_socket); return 0; } while(1) { for(packet.packet_id = 0; packet.packet_id < packet_count; packet.packet_id++) { if(partition.packet_stats[packet.packet_id] != 1) break; } if(packet.packet_id == packet_count) break; for(packet.packet_id = 0; packet.packet_id < packet_count; packet.packet_id++) { if(partition.packet_stats[packet.packet_id] != 1) { memset(packet.data, 0, sizeof(PARTITION_LENGTH_TOTAL)); at_location = packet.packet_id * PACKET_LENGTH_DATA; read_amount = at_location + PACKET_LENGTH_DATA < partition.actual_size ? PACKET_LENGTH_DATA : partition.actual_size - at_location; memcpy(packet.data, &partition.data[at_location], read_amount); packet.crc = compute_crc((const unsigned char *)packet.data, PACKET_LENGTH_DATA); return_code = sendto(data_socket, (char *)&packet, sizeof(packet), 0, (SOCKADDR *)&send_data_addr, sizeof(send_data_addr)); if(return_code == SOCKET_ERROR) { printf("failed to send command: %d\n", WSAGetLastError()); closesocket(data_socket); return 0; } Sleep(5); } } memset(control_message, 0, sizeof(MAX_INPUT_LENGTH)); control_message[0] = CONTROL_MESSAGE_PARTITION_SENT; return_code = send(descr->socket, control_message, (int)strlen(control_message)+1, 0); if(return_code == SOCKET_ERROR) { printf("failed to send command: %d\n", WSAGetLastError()); closesocket(data_socket); return 0; } for(tries = 0; tries < CONTROL_MESSAGE_RECV_RETRIES; tries++) { memset(control_message, 0, sizeof(MAX_INPUT_LENGTH)); return_code = recv(descr->socket, control_message, MAX_INPUT_LENGTH-1, 0); if(return_code < 1) { printf("failed to recv command: %d\n", WSAGetLastError()); closesocket(data_socket); return 0; } if(control_message[0] == CONTROL_MESSAGE_PARTITION_STATUS) { memset(partition.packet_stats, 0, MAX_PARTITION_DIVISIONS+1); memcpy(partition.packet_stats, &control_message[2], MAX_PARTITION_DIVISIONS); break; } } if(tries >= CONTROL_MESSAGE_RECV_RETRIES) { printf("No CONTROL_MESSAGE_PARTITION_STATUS from %s\n", inet_ntoa(descr->address.sin_addr)); closesocket(data_socket); return 0; } } packet.partition_id++; } fclose(fp); memset(control_message, 0, sizeof(MAX_INPUT_LENGTH)); control_message[0] = CONTROL_MESSAGE_ALL_DATA_SENT; return_code = send(descr->socket, control_message, (int)strlen(control_message)+1, 0); if(return_code == SOCKET_ERROR) { printf("failed to send command: %d\n", WSAGetLastError()); closesocket(data_socket); return 1; } closesocket(data_socket); return 1; }
F_NONNULL static void config_addrset(const char* res_name, const char* stanza, const bool ipv6, addrset_t* addrset, const vscf_data_t* cfg) { dmn_assert(res_name); dmn_assert(stanza); dmn_assert(addrset); dmn_assert(cfg); if(!vscf_is_hash(cfg)) log_fatal("plugin_weighted: resource '%s' stanza '%s' value must be a hash", res_name, stanza); const vscf_data_t* parent = vscf_get_parent(cfg); // inherit down the applicable res-level parameters vscf_hash_inherit(parent, (vscf_data_t*)cfg, "service_types", true); vscf_hash_inherit(parent, (vscf_data_t*)cfg, "multi", true); vscf_hash_inherit(parent, (vscf_data_t*)cfg, "up_thresh", true); // Get a starting assumption of our item count addrset->count = vscf_hash_get_len(cfg); /////// Process the parameters... // service_types addrset->num_svcs = 0; const vscf_data_t* res_stypes = vscf_hash_get_data_byconstkey(cfg, "service_types", true); if (res_stypes) { addrset->count--; // minus one for service_types entry addrset->num_svcs = vscf_array_get_len(res_stypes); if(addrset->num_svcs) { addrset->svc_names = malloc(addrset->num_svcs * sizeof(char*)); for(unsigned i = 0; i < addrset->num_svcs; i++) { const vscf_data_t* this_svc_cfg = vscf_array_get_data(res_stypes, i); if(!vscf_is_simple(this_svc_cfg)) log_fatal("plugin_weighted: resource '%s' (%s): service_types values must be strings", res_name, stanza); addrset->svc_names[i] = strdup(vscf_simple_get_data(this_svc_cfg)); } } } else { addrset->num_svcs = 1; addrset->svc_names = malloc(sizeof(char*)); addrset->svc_names[0] = strdup(DEFAULT_SVCNAME); } // multi option addrset->multi = false; const vscf_data_t* multi_cfg = vscf_hash_get_data_byconstkey(cfg, "multi", true); if(multi_cfg) { addrset->count--; // minus one for multi entry if(!vscf_is_simple(multi_cfg) || !vscf_simple_get_as_bool(multi_cfg, &addrset->multi)) log_fatal("plugin_weighted: resource '%s' (%s): 'multi' must be a boolean value ('true' or 'false')", res_name, stanza); } // up threshold as double double up_thresh = 0.5; const vscf_data_t* thresh_cfg = vscf_hash_get_data_byconstkey(cfg, "up_thresh", true); if(thresh_cfg) { addrset->count--; // minus one for up_thresh entry if(!vscf_is_simple(thresh_cfg) || !vscf_simple_get_as_double(thresh_cfg, &up_thresh) || up_thresh <= 0.0 || up_thresh > 1.0) log_fatal("plugin_weighted: resource '%s' (%s): 'up_thresh' must be a floating point value in the range (0.0 - 1.0]", res_name, stanza); } if(addrset->count > MAX_ITEMS_PER_SET) log_fatal("plugin_weighted: resource '%s' (%s): number of direct groups or addrs within one family cannot be more than %u", res_name, stanza, MAX_ITEMS_PER_SET); if(!addrset->count) log_fatal("plugin_weighted: resource '%s' (%s): empty address-family sets not allowed", res_name, stanza); addrset->items = calloc(addrset->count, sizeof(res_aitem_t)); addrset->gmode = RES_ASET_UNKNOWN; addr_iter_data_t aid = { .item_idx = 0, .addrset = addrset, .res_name = res_name, .stanza = stanza, .ipv6 = ipv6 }; vscf_hash_iterate(cfg, true, config_addrset_item, &aid); addrset->weight = 0; addrset->max_weight = 0; for(unsigned i = 0; i < addrset->count; i++) { const unsigned iwt = addrset->items[i].weight; const unsigned num_addrs = addrset->items[i].count; dmn_assert(iwt); dmn_assert(addrset->items[i].max_weight); addrset->weight += iwt; if(addrset->max_weight < iwt) addrset->max_weight = iwt; if(addrset->max_addrs_pergroup < num_addrs) addrset->max_addrs_pergroup = num_addrs; } dmn_assert(addrset->weight); dmn_assert(addrset->max_weight); addrset->up_weight = ceil(up_thresh * addrset->weight); dmn_assert(addrset->up_weight); } typedef struct { cnset_t* cnset; const char* res_name; const char* stanza; unsigned item_idx; } cname_iter_data_t; F_NONNULL static bool config_item_cname(const char* item_name, unsigned klen V_UNUSED, const vscf_data_t* cfg_data, void* cid_asvoid) { dmn_assert(item_name); dmn_assert(cfg_data); dmn_assert(cid_asvoid); cname_iter_data_t* cid = (cname_iter_data_t*)cid_asvoid; cnset_t* cnset = cid->cnset; const char* res_name = cid->res_name; const char* stanza = cid->stanza; const unsigned item_idx = cid->item_idx++; res_citem_t* res_item = &cnset->items[item_idx]; long wtemp = 0; if(!vscf_is_array(cfg_data) || (2 != vscf_array_get_len(cfg_data)) || !vscf_is_simple(vscf_array_get_data(cfg_data, 0)) || !vscf_is_simple(vscf_array_get_data(cfg_data, 1)) || !vscf_simple_get_as_long(vscf_array_get_data(cfg_data, 1), &wtemp) || wtemp < 1 || wtemp > MAX_WEIGHT ) log_fatal("plugin_weighted: resource '%s' (%s), item '%s': values in cname mode must be arrays of [ CNAME, WEIGHT ], where weight must be an integer in the range 1 - " MAX_WEIGHT_STR, res_name, stanza, item_name); res_item->weight = wtemp; const vscf_data_t* cn = vscf_array_get_data(cfg_data, 0); const char* cname_txt = vscf_simple_get_data(cn); uint8_t* dname = malloc(256); dname_status_t dnstat = vscf_simple_get_as_dname(cn, dname); if(dnstat == DNAME_INVALID) log_fatal("plugin_weighted: resource '%s' (%s), item '%s': '%s' is not a legal domainname", res_name, stanza, item_name, vscf_simple_get_data(vscf_array_get_data(cfg_data, 0))); if(dnstat == DNAME_VALID) dname = dname_trim(dname); res_item->cname = dname; if(cnset->num_svcs) { res_item->indices = malloc(cnset->num_svcs * sizeof(unsigned)); for(unsigned i = 0; i < cnset->num_svcs; i++) res_item->indices[i] = gdnsd_mon_cname(cnset->svc_names[i], cname_txt, dname); } log_debug("plugin_weighted: resource '%s' (%s), item '%s', CNAME '%s' added with weight %u", res_name, stanza, item_name, logf_dname(dname), res_item->weight); return true; }
void CvGBTrees::find_gradient(const int k) { int* sample_data = sample_idx->data.i; int* subsample_data = subsample_train->data.i; float* grad_data = data->responses->data.fl; float* resp_data = orig_response->data.fl; float* current_data = sum_response->data.fl; switch (params.loss_function_type) // loss_function_type in // {SQUARED_LOSS, ABSOLUTE_LOSS, HUBER_LOSS, DEVIANCE_LOSS} { case SQUARED_LOSS: { for (int i=0; i<get_len(subsample_train); ++i) { int s_step = (sample_idx->cols > sample_idx->rows) ? 1 : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); int idx = *(sample_data + subsample_data[i]*s_step); grad_data[idx] = resp_data[idx] - current_data[idx]; } }; break; case ABSOLUTE_LOSS: { for (int i=0; i<get_len(subsample_train); ++i) { int s_step = (sample_idx->cols > sample_idx->rows) ? 1 : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); int idx = *(sample_data + subsample_data[i]*s_step); grad_data[idx] = Sign(resp_data[idx] - current_data[idx]); } }; break; case HUBER_LOSS: { float alpha = 0.2f; int n = get_len(subsample_train); int s_step = (sample_idx->cols > sample_idx->rows) ? 1 : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); float* residuals = new float[n]; for (int i=0; i<n; ++i) { int idx = *(sample_data + subsample_data[i]*s_step); residuals[i] = fabs(resp_data[idx] - current_data[idx]); } icvSortFloat(residuals, n, 0.0f); delta = residuals[int(ceil(n*alpha))]; for (int i=0; i<n; ++i) { int idx = *(sample_data + subsample_data[i]*s_step); float r = resp_data[idx] - current_data[idx]; grad_data[idx] = (fabs(r) > delta) ? delta*Sign(r) : r; } delete[] residuals; }; break; case DEVIANCE_LOSS: { for (int i=0; i<get_len(subsample_train); ++i) { double exp_fk = 0; double exp_sfi = 0; int s_step = (sample_idx->cols > sample_idx->rows) ? 1 : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); int idx = *(sample_data + subsample_data[i]*s_step); for (int j=0; j<class_count; ++j) { double res; res = current_data[idx + j*sum_response->cols]; res = exp(res); if (j == k) exp_fk = res; exp_sfi += res; } int orig_label = int(resp_data[idx]); /* grad_data[idx] = (float)(!(k-class_labels->data.i[orig_label]+1)) - (float)(exp_fk / exp_sfi); */ int ensemble_label = 0; while (class_labels->data.i[ensemble_label] - orig_label) ensemble_label++; grad_data[idx] = (float)(!(k-ensemble_label)) - (float)(exp_fk / exp_sfi); } }; break; default: break; } } // CvGBTrees::find_gradient(...)
void DoClientThinks( gentity_t *ent ) { int lastCmd, lastTime; int latestTime; int drop_threshold = LAG_MAX_DROP_THRESHOLD; int startPackets = ent->client->cmdcount; if ( ent->client->cmdcount <= 0 ) return; // allow some more movement if time has passed latestTime = trap_Milliseconds(); if ( ent->client->lastCmdRealTime > latestTime ) { // zinx - stoopid server went backwards in time, reset the delta // instead of giving them even -less- movement ability ent->client->cmddelta = 0; } else { ent->client->cmddelta -= (latestTime - ent->client->lastCmdRealTime); } if ( ent->client->cmdcount <= 1 && ent->client->cmddelta < 0 ) ent->client->cmddelta = 0; ent->client->lastCmdRealTime = latestTime; lastCmd = (ent->client->cmdhead + ent->client->cmdcount - 1) % LAG_MAX_COMMANDS; lastTime = ent->client->ps.commandTime; latestTime = ent->client->cmds[lastCmd].serverTime; while ( ent->client->cmdcount > 0 ) { usercmd_t *cmd = &ent->client->cmds[ent->client->cmdhead]; float speed, delta, scale; int savedTime; qboolean deltahax = qfalse; int serverTime = cmd->serverTime; int totalDelta = latestTime - cmd->serverTime; int timeDelta; if ( ent->client->pers.pmoveFixed ) { serverTime = ((serverTime + pmove_msec.integer-1) / pmove_msec.integer) * pmove_msec.integer; } timeDelta = serverTime - lastTime; if ( totalDelta >= drop_threshold ) { // zinx - whoops. too lagged. drop_threshold = LAG_MIN_DROP_THRESHOLD; lastTime = ent->client->ps.commandTime = cmd->serverTime; goto drop_packet; } if ( totalDelta < 0 ) { // zinx - oro? packet from the future goto drop_packet; } if ( timeDelta <= 0 ) { // zinx - packet from the past goto drop_packet; } scale = 1.f / LAG_DECAY; speed = G_CmdScale( ent, cmd ); delta = (speed * (float)timeDelta); delta *= scale; if ( timeDelta > 50 ) { timeDelta = 50; delta = (speed * (float)timeDelta); delta *= scale; deltahax = qtrue; } if ( (ent->client->cmddelta + delta) >= LAG_MAX_DELTA ) { // too many commands this server frame // if it'll fit in the next frame, just wait until then. if ( delta < LAG_MAX_DELTA && (totalDelta + delta) < LAG_MIN_DROP_THRESHOLD ) break; // try to split it up in to smaller commands delta = ((float)LAG_MAX_DELTA - ent->client->cmddelta); timeDelta = ceil(delta / speed); // prefer speedup delta = (float)timeDelta * speed; if ( timeDelta < 1 ) break; delta *= scale; deltahax = qtrue; } ent->client->cmddelta += delta; if ( deltahax ) { savedTime = cmd->serverTime; cmd->serverTime = lastTime + timeDelta; } else { savedTime = 0; // zinx - shut up compiler } // zinx - erh. hack, really. make it run for the proper amount of time. ent->client->ps.commandTime = lastTime; ClientThink_cmd( ent , cmd ); lastTime = ent->client->ps.commandTime; if ( deltahax ) { cmd->serverTime = savedTime; if ( delta <= 0.1f ) break; continue; } drop_packet: if ( ent->client->cmdcount <= 0 ) { // ent->client was cleared... break; } ent->client->cmdhead = (ent->client->cmdhead + 1) % LAG_MAX_COMMANDS; ent->client->cmdcount--; continue; } // zinx - added ping, packets processed this frame // warning: eats bandwidth like popcorn if ( g_antiwarp.integer & 32 ) { trap_SendServerCommand( ent - g_entities, va( "cp \"%d %d\n\"", latestTime - lastTime, startPackets - ent->client->cmdcount ) ); } // zinx - debug; size is added lag (amount above player's network lag) // rotation is time if ( (g_antiwarp.integer & 16) && ent->client->cmdcount ) { vec3_t org, parms; VectorCopy( ent->client->ps.origin, org ); SnapVector( org ); parms[0] = 3; parms[1] = (float)(latestTime - ent->client->ps.commandTime) / 10.f; if (parms[1] < 1.f) parms[1] = 1.f; parms[2] = (ent->client->ps.commandTime * 180.f) / 1000.f; //etpro_AddDebugLine( org, parms, ((ent - g_entities) % 32), LINEMODE_SPOKES, LINESHADER_RAILCORE, 0, qfalse ); } //ent->client->ps.stats[STAT_ANTIWARP_DELAY] = latestTime - ent->client->ps.commandTime; //if (ent->client->ps.stats[STAT_ANTIWARP_DELAY] < 0) // ent->client->ps.stats[STAT_ANTIWARP_DELAY] = 0; }
void OMXPlayerAudio::HandleSyncError(double duration) { double clock = m_av_clock->GetClock(); double error = m_audioClock - clock; int64_t now; if( fabs(error) > DVD_MSEC_TO_TIME(100) || m_syncclock ) { m_av_clock->Discontinuity(clock+error); /* if(m_speed == DVD_PLAYSPEED_NORMAL) CLog::Log(LOGDEBUG, "OMXPlayerAudio:: Discontinuity - was:%f, should be:%f, error:%f\n", clock, clock+error, error); */ m_errorbuff = 0; m_errorcount = 0; m_skipdupcount = 0; m_error = 0; m_syncclock = false; m_errortime = m_av_clock->CurrentHostCounter(); return; } if (m_speed != DVD_PLAYSPEED_NORMAL) { m_errorbuff = 0; m_errorcount = 0; m_integral = 0; m_skipdupcount = 0; m_error = 0; m_errortime = m_av_clock->CurrentHostCounter(); return; } //check if measured error for 1 second now = m_av_clock->CurrentHostCounter(); if ((now - m_errortime) >= m_freq) { m_errortime = now; m_error = m_errorbuff / m_errorcount; m_errorbuff = 0; m_errorcount = 0; if (m_synctype == SYNC_DISCON) { double limit, error; if (m_av_clock->GetRefreshRate(&limit) > 0) { //when the videoreferenceclock is running, the discontinuity limit is one vblank period limit *= DVD_TIME_BASE; //make error a multiple of limit, rounded towards zero, //so it won't interfere with the sync methods in CXBMCRenderManager::WaitPresentTime if (m_error > 0.0) error = limit * floor(m_error / limit); else error = limit * ceil(m_error / limit); } else { limit = DVD_MSEC_TO_TIME(10); error = m_error; } /* limit = DVD_MSEC_TO_TIME(10); error = m_error; */ if (fabs(error) > limit - 0.001) { m_av_clock->Discontinuity(clock+error); /* if(m_speed == DVD_PLAYSPEED_NORMAL) CLog::Log(LOGDEBUG, "COMXPlayerAudio:: Discontinuity - was:%f, should be:%f, error:%f", clock, clock+error, error); */ } } /* else if (m_synctype == SYNC_SKIPDUP && m_skipdupcount == 0 && fabs(m_error) > DVD_MSEC_TO_TIME(10)) if (m_skipdupcount == 0 && fabs(m_error) > DVD_MSEC_TO_TIME(10)) { //check how many packets to skip/duplicate m_skipdupcount = (int)(m_error / duration); //if less than one frame off, see if it's more than two thirds of a frame, so we can get better in sync if (m_skipdupcount == 0 && fabs(m_error) > duration / 3 * 2) m_skipdupcount = (int)(m_error / (duration / 3 * 2)); if (m_skipdupcount > 0) CLog::Log(LOGDEBUG, "OMXPlayerAudio:: Duplicating %i packet(s) of %.2f ms duration", m_skipdupcount, duration / DVD_TIME_BASE * 1000.0); else if (m_skipdupcount < 0) CLog::Log(LOGDEBUG, "OMXPlayerAudio:: Skipping %i packet(s) of %.2f ms duration ", m_skipdupcount * -1, duration / DVD_TIME_BASE * 1000.0); } */ } }
perm2 createPerm2(unsigned int *elems, unsigned int nelems, unsigned int t, static_bitsequence_builder * bmb) {//if uints is set to true the given array is of uints using 32 bits each perm2 P; unsigned int *b, *baux, nextelem, i, j, bptr, aux, antbptr,nbwdptrs, elem,nbits, firstelem, cyclesize; auxbwd2 *auxbwdptr; P = new struct sperm2; P->elems = elems; P->nelems = nelems; P->nbits = bits(nelems-1); nbits = bits(nelems-1); P->t = t; if (t==1) { P->bwdptrs = new unsigned int[uint_len(nelems,nbits)]; for(unsigned int m=0;m<uint_len(nelems,nbits);m++) P->bwdptrs[m]=0; assert(P->bwdptrs!=NULL); P->nbwdptrs = nelems; for (i=0; i<nelems; i++) { unsigned int bg = elems[i]; assert(bg<nelems); set_field(P->bwdptrs, nbits, bg, i); } P->bmap = NULL; } else { auxbwdptr = new auxbwd2[(t+((int)ceil((double)nelems/t)))]; assert(auxbwdptr!=NULL); b = new unsigned int[uint_len(nelems,1)]; for(i=0;i<uint_len(nelems,1);i++) b[i]=0; assert(b!=NULL); baux = new unsigned int[uint_len(nelems,1)]; for(i=0;i<uint_len(nelems,1);i++) baux[i] = 0; assert(baux!=NULL); nbwdptrs = 0; for (i = 0; i < nelems; i++) { if (bitget(baux,i) == 0) { nextelem = j = bptr = antbptr = i; aux = 0; bitset(baux, j); cyclesize = 0; firstelem = j; while ((elem=elems[j]) != nextelem) {//P->elems[j] j = elem; bitset(baux, j); aux++; if (aux >= t) { auxbwdptr[nbwdptrs].key = j; auxbwdptr[nbwdptrs++].pointer = bptr; antbptr = bptr; bptr = j; aux = 0; bitset(b, j); } cyclesize++; } if (cyclesize >= t) { auxbwdptr[nbwdptrs].key = nextelem; auxbwdptr[nbwdptrs++].pointer = bptr; bitset(b, nextelem); } } } qsort(auxbwdptr, nbwdptrs, sizeof(auxbwd2), &compare2); aux = uint_len(nbwdptrs,P->nbits); P->bwdptrs = new unsigned int[aux]; assert(P->bwdptrs!=NULL); for(i=0;i<aux;i++) P->bwdptrs[i] = 0; P->nbwdptrs = nbwdptrs; for (i = 0; i < nbwdptrs; i++) { set_field(P->bwdptrs, nbits, i, auxbwdptr[i].pointer); //if(i<5) // printf(" %d ",get_field(P->bwdptrs,nbits,i)); } //printf("\n"); P->bmap = bmb->build(b, nelems); //delete [] P->bmap; delete [] b; delete [] (baux); delete [] (auxbwdptr); } return P; }
static int msContourLayerReadRaster(layerObj *layer, rectObj rect) { mapObj *map = layer->map; char **bands; char pointer[64], memDSPointer[128]; int band = 1; double adfGeoTransform[6], adfInvGeoTransform[6]; double llx, lly, urx, ury; rectObj copyRect, mapRect; int dst_xsize, dst_ysize; int virtual_grid_step_x, virtual_grid_step_y; int src_xoff, src_yoff, src_xsize, src_ysize; double map_cellsize_x, map_cellsize_y, dst_cellsize_x, dst_cellsize_y; GDALRasterBandH hBand = NULL; CPLErr eErr; contourLayerInfo *clinfo = (contourLayerInfo *) layer->layerinfo; if (layer->debug) msDebug("Entering msContourLayerReadRaster().\n"); if (clinfo == NULL || clinfo->hOrigDS == NULL) { msSetError(MS_MISCERR, "Assertion failed: Contour layer not opened!!!", "msContourLayerReadRaster()"); return MS_FAILURE; } bands = CSLTokenizeStringComplex( CSLFetchNameValue(layer->processing,"BANDS"), " ,", FALSE, FALSE ); if (CSLCount(bands) > 0) { band = atoi(bands[0]); if (band < 1 || band > GDALGetRasterCount(clinfo->hOrigDS)) { msSetError( MS_IMGERR, "BANDS PROCESSING directive includes illegal band '%d', should be from 1 to %d.", "msContourLayerReadRaster()", band, GDALGetRasterCount(clinfo->hOrigDS)); CSLDestroy(bands); return MS_FAILURE; } } CSLDestroy(bands); hBand = GDALGetRasterBand(clinfo->hOrigDS, band); if (hBand == NULL) { msSetError(MS_IMGERR, "Band %d does not exist on dataset.", "msContourLayerReadRaster()", band); return MS_FAILURE; } if (layer->projection.numargs > 0 && EQUAL(layer->projection.args[0], "auto")) { const char *wkt; wkt = GDALGetProjectionRef(clinfo->hOrigDS); if (wkt != NULL && strlen(wkt) > 0) { if (msOGCWKT2ProjectionObj(wkt, &(layer->projection), layer->debug) != MS_SUCCESS) { char msg[MESSAGELENGTH*2]; errorObj *ms_error = msGetErrorObj(); snprintf( msg, sizeof(msg), "%s\n" "PROJECTION AUTO cannot be used for this " "GDAL raster (`%s').", ms_error->message, layer->data); msg[MESSAGELENGTH-1] = '\0'; msSetError(MS_OGRERR, "%s","msDrawRasterLayer()", msg); return MS_FAILURE; } } } /* * Compute the georeferenced window of overlap, and read the source data * downsampled to match output resolution, or at full resolution if * output resolution is lower than the source resolution. * * A large portion of this overlap calculation code was borrowed from * msDrawRasterLayerGDAL(). * Would be possible to move some of this to a reusable function? * * Note: This code works only if no reprojection is involved. It would * need rework to support cases where output projection differs from source * data file projection. */ src_xsize = GDALGetRasterXSize(clinfo->hOrigDS); src_ysize = GDALGetRasterYSize(clinfo->hOrigDS); /* set the Dataset extent */ msGetGDALGeoTransform(clinfo->hOrigDS, map, layer, adfGeoTransform); clinfo->extent.minx = adfGeoTransform[0]; clinfo->extent.maxy = adfGeoTransform[3]; clinfo->extent.maxx = adfGeoTransform[0] + src_xsize * adfGeoTransform[1]; clinfo->extent.miny = adfGeoTransform[3] + src_ysize * adfGeoTransform[5]; if (layer->transform) { if (layer->debug) msDebug("msContourLayerReadRaster(): Entering transform.\n"); InvGeoTransform(adfGeoTransform, adfInvGeoTransform); mapRect = rect; map_cellsize_x = map_cellsize_y = map->cellsize; #ifdef USE_PROJ /* if necessary, project the searchrect to source coords */ if (msProjectionsDiffer( &(map->projection), &(layer->projection))) { if ( msProjectRect(&map->projection, &layer->projection, &mapRect) != MS_SUCCESS ) { msDebug("msContourLayerReadRaster(%s): unable to reproject map request rectangle into layer projection, canceling.\n", layer->name); return MS_FAILURE; } map_cellsize_x = MS_CELLSIZE(mapRect.minx, mapRect.maxx, map->width); map_cellsize_y = MS_CELLSIZE(mapRect.miny, mapRect.maxy, map->height); /* if the projection failed to project the extent requested, we need to calculate the cellsize to preserve the initial map cellsize ratio */ if ( (mapRect.minx < GEO_TRANS(adfGeoTransform,0,src_ysize)) || (mapRect.maxx > GEO_TRANS(adfGeoTransform,src_xsize,0)) || (mapRect.miny < GEO_TRANS(adfGeoTransform+3,0,src_ysize)) || (mapRect.maxy > GEO_TRANS(adfGeoTransform+3,src_xsize,0)) ) { int src_unit, dst_unit; src_unit = GetMapserverUnitUsingProj(&map->projection); dst_unit = GetMapserverUnitUsingProj(&layer->projection); if (src_unit == -1 || dst_unit == -1) { msDebug("msContourLayerReadRaster(%s): unable to reproject map request rectangle into layer projection, canceling.\n", layer->name); return MS_FAILURE; } map_cellsize_x = MS_CONVERT_UNIT(src_unit, dst_unit, MS_CELLSIZE(rect.minx, rect.maxx, map->width)); map_cellsize_y = MS_CONVERT_UNIT(src_unit, dst_unit, MS_CELLSIZE(rect.miny, rect.maxy, map->height)); } } #endif if (map_cellsize_x == 0 || map_cellsize_y == 0) { if (layer->debug) msDebug("msContourLayerReadRaster(): Cellsize can't be 0.\n"); return MS_FAILURE; } /* Adjust MapServer pixel model to GDAL pixel model */ mapRect.minx -= map_cellsize_x*0.5; mapRect.maxx += map_cellsize_x*0.5; mapRect.miny -= map_cellsize_y*0.5; mapRect.maxy += map_cellsize_y*0.5; /* * If raw data cellsize (from geotransform) is larger than output map_cellsize * then we want to extract only enough data to match the output map resolution * which means that GDAL will automatically sample the data on read. * * To prevent bad contour effects on tile edges, we adjust the target cellsize * to align the extracted window with a virtual grid based on the origin of the * raw data and a virtual grid step size corresponding to an integer sampling step. * * If source data has a greater cellsize (i.e. lower res) that requested ouptut map * then we use the raw data cellsize as target cellsize since there is no point in * interpolating the data for contours in this case. */ virtual_grid_step_x = (int)floor(map_cellsize_x / ABS(adfGeoTransform[1])); if (virtual_grid_step_x < 1) virtual_grid_step_x = 1; /* Do not interpolate data if grid sampling step < 1 */ virtual_grid_step_y = (int)floor(map_cellsize_y / ABS(adfGeoTransform[5])); if (virtual_grid_step_y < 1) virtual_grid_step_y = 1; /* Do not interpolate data if grid sampling step < 1 */ /* target cellsize is a multiple of raw data cellsize based on grid step*/ dst_cellsize_x = ABS(adfGeoTransform[1]) * virtual_grid_step_x; dst_cellsize_y = ABS(adfGeoTransform[5]) * virtual_grid_step_y; /* Compute overlap between source and target views */ copyRect = mapRect; if (copyRect.minx < GEO_TRANS(adfGeoTransform,0,src_ysize)) copyRect.minx = GEO_TRANS(adfGeoTransform,0,src_ysize); if (copyRect.maxx > GEO_TRANS(adfGeoTransform,src_xsize,0)) copyRect.maxx = GEO_TRANS(adfGeoTransform,src_xsize,0); if (copyRect.miny < GEO_TRANS(adfGeoTransform+3,0,src_ysize)) copyRect.miny = GEO_TRANS(adfGeoTransform+3,0,src_ysize); if (copyRect.maxy > GEO_TRANS(adfGeoTransform+3,src_xsize,0)) copyRect.maxy = GEO_TRANS(adfGeoTransform+3,src_xsize,0); if (copyRect.minx >= copyRect.maxx || copyRect.miny >= copyRect.maxy) { if (layer->debug) msDebug("msContourLayerReadRaster(): Error in overlap calculation.\n"); return MS_FAILURE; } /* * Convert extraction window to raster coordinates */ llx = GEO_TRANS(adfInvGeoTransform+0,copyRect.minx,copyRect.miny); lly = GEO_TRANS(adfInvGeoTransform+3,copyRect.minx,copyRect.miny); urx = GEO_TRANS(adfInvGeoTransform+0,copyRect.maxx,copyRect.maxy); ury = GEO_TRANS(adfInvGeoTransform+3,copyRect.maxx,copyRect.maxy); /* * Align extraction window with virtual grid * (keep in mind raster coordinates origin is at upper-left) * We also add an extra buffer to fix tile boundarie issues when zoomed */ llx = floor(llx / virtual_grid_step_x) * virtual_grid_step_x - (virtual_grid_step_x*5); urx = ceil(urx / virtual_grid_step_x) * virtual_grid_step_x + (virtual_grid_step_x*5); ury = floor(ury / virtual_grid_step_y) * virtual_grid_step_y - (virtual_grid_step_x*5); lly = ceil(lly / virtual_grid_step_y) * virtual_grid_step_y + (virtual_grid_step_x*5); src_xoff = MAX(0,(int) floor(llx+0.5)); src_yoff = MAX(0,(int) floor(ury+0.5)); src_xsize = MIN(MAX(0,(int) (urx - llx + 0.5)), GDALGetRasterXSize(clinfo->hOrigDS) - src_xoff); src_ysize = MIN(MAX(0,(int) (lly - ury + 0.5)), GDALGetRasterYSize(clinfo->hOrigDS) - src_yoff); /* Update the geographic extent (buffer added) */ /* TODO: a better way to go the geo_trans */ copyRect.minx = GEO_TRANS(adfGeoTransform+0,src_xoff,0); copyRect.maxx = GEO_TRANS(adfGeoTransform+0,src_xoff+src_xsize,0); copyRect.miny = GEO_TRANS(adfGeoTransform+3,0,src_yoff+src_ysize); copyRect.maxy = GEO_TRANS(adfGeoTransform+3,0,src_yoff); /* * If input window is to small then stop here */ if (src_xsize < 2 || src_ysize < 2) { if (layer->debug) msDebug("msContourLayerReadRaster(): input window too small, or no apparent overlap between map view and this window(1).\n"); return MS_FAILURE; } /* Target buffer size */ dst_xsize = (int)ceil((copyRect.maxx - copyRect.minx) / dst_cellsize_x); dst_ysize = (int)ceil((copyRect.maxy - copyRect.miny) / dst_cellsize_y); if (dst_xsize == 0 || dst_ysize == 0) { if (layer->debug) msDebug("msContourLayerReadRaster(): no apparent overlap between map view and this window(2).\n"); return MS_FAILURE; } if (layer->debug) msDebug( "msContourLayerReadRaster(): src=%d,%d,%d,%d, dst=%d,%d,%d,%d\n", src_xoff, src_yoff, src_xsize, src_ysize, 0, 0, dst_xsize, dst_ysize ); } else { src_xoff = 0; src_yoff = 0; dst_xsize = src_xsize = MIN(map->width,src_xsize); dst_ysize = src_ysize = MIN(map->height,src_ysize); } /* -------------------------------------------------------------------- */ /* Allocate buffer, and read data into it. */ /* -------------------------------------------------------------------- */ clinfo->buffer = (double *) malloc(sizeof(double) * dst_xsize * dst_ysize); if (clinfo->buffer == NULL) { msSetError(MS_MEMERR, "Malloc(): Out of memory.", "msContourLayerReadRaster()"); return MS_FAILURE; } eErr = GDALRasterIO(hBand, GF_Read, src_xoff, src_yoff, src_xsize, src_ysize, clinfo->buffer, dst_xsize, dst_ysize, GDT_Float64, 0, 0); if (eErr != CE_None) { msSetError( MS_IOERR, "GDALRasterIO() failed: %s", "msContourLayerReadRaster()", CPLGetLastErrorMsg() ); free(clinfo->buffer); return MS_FAILURE; } memset(pointer, 0, sizeof(pointer)); CPLPrintPointer(pointer, clinfo->buffer, sizeof(pointer)); sprintf(memDSPointer,"MEM:::DATAPOINTER=%s,PIXELS=%d,LINES=%d,BANDS=1,DATATYPE=Float64", pointer, dst_xsize, dst_ysize); clinfo->hDS = GDALOpen(memDSPointer, GF_Read); if (clinfo->hDS == NULL) { msSetError(MS_IMGERR, "Unable to open GDAL Memory dataset.", "msContourLayerReadRaster()"); free(clinfo->buffer); return MS_FAILURE; } adfGeoTransform[0] = copyRect.minx; adfGeoTransform[1] = dst_cellsize_x; adfGeoTransform[2] = 0; adfGeoTransform[3] = copyRect.maxy; adfGeoTransform[4] = 0; adfGeoTransform[5] = -dst_cellsize_y; clinfo->cellsize = MAX(dst_cellsize_x, dst_cellsize_y); { char buf[64]; sprintf(buf, "%lf", clinfo->cellsize); msInsertHashTable(&layer->metadata, "__data_cellsize__", buf); } GDALSetGeoTransform(clinfo->hDS, adfGeoTransform); return MS_SUCCESS; }
MStatus AbcExport::doIt(const MArgList & args) { MStatus status; MTime oldCurTime = MAnimControl::currentTime(); MArgParser argData(syntax(), args, &status); if (status != MS::kSuccess) return status; unsigned int numberOfArguments = args.length(); MString msg; msg += "AbcExport [options] tranlation_jobs_description_string\n\n"; msg += "Options:\n"; msg += "-h / help Print this message.\n"; msg += "\n"; msg += "-sa / startAt float (default: 0.0f)\n"; msg += "The frame to start scene evaluation at. This is used to set\n"; msg += "the starting frame for time dependent translations and can\n"; msg += "be used to add run-up that isn't actually translated.\n"; msg += "\n"; msg += "-sf / skipFrame boolean (default: false)\n"; msg += "When evaluating multiple translate jobs, this flag decides\n"; msg += "whether or not to skip frame if possible.\n"; msg += "\n"; msg += "-sl / selection\n"; msg += "If this flag is present: if tranlation_jobs_description_string\n"; msg += "is empty, write out all nodes from the active selection list;\n"; msg += "if tranlation_jobs_description_string is not empty, write out\n"; msg += "only the nodes in the active selection list.\n"; msg += "\n"; msg += "-d / debug Print debug log\n"; msg += "\n"; msg += "(Each translation job is seperated by ;)\n"; msg += "\n"; msg += "per translation job optional flags:\n"; msg += "\n"; msg += "range float startTime float endTime\n"; msg += "The frame range to write.\n"; msg += "\n"; msg += "uv\n"; msg += "If set, AbcExport will bake the current uv set of polygons\n"; msg += "and subD meshes into property \"st\" on the nodes.\n"; msg += "By default this flag is not set.\n"; msg += "\n"; msg += "shutterOpen float (default: 0.0)\n"; msg += "Motion blur starting time.\n"; msg += "\n"; msg += "shutterClose float (default: 0.0)\n"; msg += "Motion blur end time\n"; msg += "\n"; msg += "numSamples unsigned int (default: 2)\n"; msg += "The number of times to sample within a given frame with\n"; msg += "motion blur applied. If shutterOpen is equal to\n"; msg += "shutterClose then numSamples is ignored.\n"; msg += "\n"; msg += "noSampleGeo\n"; msg += "If set, only write out geometry on whole frames, not\n"; msg += "subframes. This flag is not set by default.\n"; msg += "Transforms may still be written out on subframes.\n"; msg += "\n"; msg += "attrPrefix string (default: SPT_)\n"; msg += "Prefix filter for determining which attributes to write out\n"; msg += "\n"; msg += "attrs string\n"; msg += "Comma seperated list of attributes to write out, these\n"; msg += "attributes will ignore the attr prefix filter.\n"; msg += "\n"; msg += "writeVisibility bool (default: false)\n"; msg += "Whether or not to write the visibility state to the file.\n"; msg += "If false then visibility is not written and everything is\n"; msg += "assumed to be visible.\n"; msg += "\n"; msg += "worldSpace\n"; msg += "If set, the root nodes will be stored in world space.\n"; msg += "By default it is stored in local space.\n"; msg += "\n"; msg += "melPerFrameCallback string (default: "")\n"; msg += "When each frame (and the static frame) is evaluated the\n"; msg += "string specified is evaluated as a Mel command.\n"; msg += "See below for special processing rules.\n"; msg += "Example: melPerFrameCallback print(\"#FRAME#\")\n"; msg += "\n"; msg += "melPostCallback string (default: "")\n"; msg += "When the translation has finished the string specified is\n"; msg += "evaluated as a Mel command.\n"; msg += "See below for special processing rules.\n"; msg += "Example: melPostCallback print(\"Done!\")\n"; msg += "\n"; msg += "pythonPerFrameCallback string (default: "")\n"; msg += "When each frame (and the static frame) is evaluated the\n"; msg += "string specified is evaluated as a python command.\n"; msg += "See below for special processing rules.\n"; msg += "Example: pythonPerFrameCallback print(\"#FRAME#\")\n"; msg += "\n"; msg += "pythonPostCallback string (default: "")\n"; msg += "When the translation has finished the string specified is\n"; msg += "evaluated as a python command.\n"; msg += "See below for special processing rules.\n"; msg += "Example: pythonPostCallback print(\"Done!\")\n"; msg += "\n"; msg += "On the callbacks, special tokens are replaced with other\n"; msg += "data, these tokens and what they are replaced with are as\n"; msg += "follows:\n"; msg += "\n"; msg += "#FRAME# replaced with the frame number being evaluated, if\n"; msg += "the static frame is being evaluated then #FRAME# is not\n"; msg += "replaced. #FRAME# is ignored in the post callbacks.\n"; msg += "\n"; msg += "#BOUNDS# replaced with the bounding box values in minX minY\n"; msg += "minZ maxX maxY maxZ space seperated order.\n"; msg += "\n"; msg += "#BOUNDSARRAY# replaced with the bounding box values as\n"; msg += "above, but in array form. In Mel:\n"; msg += "In Mel: {minX, minY, minZ, maxX, maxY, maxZ}\n"; msg += "In Python: [minX, minY, minZ, maxX, maxY, maxZ]"; msg += "\n"; msg += "Command Examples:\n"; msg += "AbcExport -d -sf \"range 1 24 test_hi test_lo /tmp/test.abc\"\n"; msg += "AbcExport \"worldSpace test_hi /tmp/test_hi.abc\"\n"; msg += "AbcExport \"range 1 24 shutterOpen 0.0 shutterClose 0.5 "; msg += "numSamples 2 test_hi test_lo /tmp/test.abc\"\n"; msg += "AbcExport -d \"range 101 700 test_hi /tmp/test.abc; range 10"; msg += " 55 test_lo /tmp/test1.abc\"\n"; msg += "\n"; msg += "Note that multiple nodes can be written to the same file,\n"; msg += "but these nodes should not have any parenting relationships\n"; msg += "or the job will not be written out.\n"; if (argData.isFlagSet("help")) { MGlobal::displayInfo(msg); return MS::kSuccess; } bool debug = argData.isFlagSet("debug"); // If skipFrame is true, when going through the playback range of the // scene, as much frames are skipped when possible. This could cause // a problem for, time dependent solutions like // particle system / hair simulation bool skipFrame = false; if (argData.isFlagSet("skipFrame")) skipFrame = true; bool useSelectionList = false; if (argData.isFlagSet("selection")) useSelectionList = true; double startEvaluationTime = FLT_MAX; if (argData.isFlagSet("startAt")) { double startAt = 0.0; argData.getFlagArgument("startAt", 0, startAt); startEvaluationTime = startAt; } // Very rudimentary argument parser: no syntax checking at all !!! MString argStr; // status = argData.getCommandArgument(0, argStr); argStr = args.asString(numberOfArguments-1, &status); MStringArray jobStringArray; status = argStr.split(';', jobStringArray); unsigned int jobSize = jobStringArray.length(); if (jobSize == 0) return status; // the frame range we will be iterating over for all jobs, // includes frames which are not skipped and the startAt offset std::set<double> allFrameRange; // this will eventually hold only the animated jobs. // its a list because we will be removing jobs from it std::list < AbcWriteJobPtr > jobList; for (unsigned int jobIndex = 0; jobIndex < jobSize; jobIndex++) { unsigned int argc = 0; // parse the string MString tstr = jobStringArray[jobIndex]; MStringArray strArr; status = tstr.split(' ', strArr); unsigned int length = strArr.length(); double startTime = oldCurTime.value(); double endTime = oldCurTime.value(); double shutterOpen = 0.0; double shutterClose = 0.0; int numSamples = 1; bool sampleGeo = true; // whether or not to subsample geometry bool worldSpace = false; bool writeVisibility = false; bool writeUVs = false; // DAG path array of nodes to be written out as root nodes in the file util::ShapeSet dagPath; // name of the abc file the job will be written into std::string fileName; // the list of frames written into the abc file std::set<double> geoSamples; std::set<double> transSamples; std::string melPerFrameCallback; std::string melPostCallback; std::string pythonPerFrameCallback; std::string pythonPostCallback; // attribute filtering stuff std::string prefixFilter = "SPT_"; std::set<std::string> attribsSet; // parser for each job while (argc < length) { if (strArr[argc] == "range") // range start end { // guard against overruns if (argc + 2 >= length) return MS::kFailure; // looking for two floating point numbers util::isFloat(strArr[argc+1], msg); util::isFloat(strArr[argc+2], msg); startTime = floor(strArr[argc+1].asDouble()); endTime = ceil(strArr[argc+2].asDouble()); // make sure start frame is smaller or equal to endTime if (startTime > endTime) { double temp = startTime; startTime = endTime; endTime = temp; } argc += 3; } else if (strArr[argc] == "uv") { writeUVs = true; argc++; } else if (strArr[argc] == "shutterOpen") { if (argc + 1 >= length) return MS::kFailure; util::isFloat(strArr[argc+1], msg); shutterOpen = strArr[argc+1].asDouble(); argc += 2; } else if (strArr[argc] == "shutterClose") { if (argc + 1 >= length) return MS::kFailure; util::isFloat(strArr[argc+1], msg); shutterClose = strArr[argc+1].asDouble(); argc += 2; } else if (strArr[argc] == "numSamples") { if (argc + 1 >= length) return MS::kFailure; util::isUnsigned(strArr[argc+1], msg); numSamples = strArr[argc+1].asInt(); argc += 2; } else if (strArr[argc] == "writeVisibility") { writeVisibility = true; argc++; } else if (strArr[argc] == "worldSpace") { worldSpace = true; argc++; } else if (strArr[argc] == "noSampleGeo") { sampleGeo = false; argc++; } else if (strArr[argc] == "melPerFrameCallback") { if (argc + 1 >= length) return MS::kFailure; melPerFrameCallback = strArr[argc+1].asChar(); argc += 2; } else if (strArr[argc] == "melPostCallback") { if (argc + 1 >= length) return MS::kFailure; melPostCallback = strArr[argc+1].asChar(); argc += 2; } else if (strArr[argc] == "pythonPerFrameCallback") { if (argc + 1 >= length) return MS::kFailure; pythonPerFrameCallback = strArr[argc+1].asChar(); argc += 2; } else if (strArr[argc] == "pythonPostCallback") { if (argc + 1 >= length) return MS::kFailure; pythonPostCallback = strArr[argc+1].asChar(); argc += 2; } else if (strArr[argc] == "attrPrefix") { if (argc + 1 >= length) return MS::kFailure; prefixFilter = strArr[argc+1].asChar(); argc += 2; } else if (strArr[argc] == "attrs") { if (argc + 1 >= length) return MS::kFailure; MString attrString = strArr[argc+1]; MStringArray attribs; attrString.split(',', attribs); unsigned int attribsLength = attrString.length(); for (unsigned int i = 0; i < attribsLength; ++i) { MString & attrib = attribs[i]; if (attrib != "" && attrib != "visibility") { attribsSet.insert(attrib.asChar()); } } argc += 2; } else // assume in the order of node names and then abc file name { for (; argc < length-1; argc++) { MSelectionList sel; if (!sel.add(strArr[argc])) { MString warn = "Could not select "; warn += strArr[argc]; warn += ". Skipping..."; MGlobal::displayWarning(warn); continue; } MDagPath path; if (!sel.getDagPath(0, path)) { MGlobal::displayWarning( "Not a DAG Node. Skipping... "); continue; } dagPath.insert(path); } // check for validity of the DagPath relationships // complexity : n^2 bool isAncestor = false; if (dagPath.size() > 1) { util::ShapeSet::iterator m, n; const util::ShapeSet::iterator end = dagPath.end(); for (m = dagPath.begin(); m != end; ) { MDagPath path1 = *m; m++; for (n = m; n != end; n++) { MDagPath path2 = *n; if (util::isAncestorDescendentRelationship(path1, path2)) isAncestor = true; } // for n } // for m } if (isAncestor == true) return MS::kFailure; if (argc >= length) return MS::kFailure; fileName = strArr[argc++].asChar(); } } std::set <double> origSamples; for (double f = startTime; f <= endTime; f++) origSamples.insert(f); transSamples = origSamples; geoSamples = origSamples; Alembic::AbcCoreAbstract::v1::chrono_t fps24 = 1/24.0; Alembic::AbcCoreAbstract::v1::TimeSamplingType transTime(fps24); Alembic::AbcCoreAbstract::v1::TimeSamplingType geoTime(fps24); // post process, add extra motion blur samples if (numSamples > 1 && shutterOpen < shutterClose) { transTime = Alembic::AbcCoreAbstract::v1::TimeSamplingType( numSamples, fps24); // if we are't subsampling the geometry, leave it as uniform if (sampleGeo) geoTime = transTime; std::set<double> offsetSamples; offsetSamples.insert(shutterOpen); offsetSamples.insert(shutterClose); double offset = (shutterClose - shutterOpen) / (numSamples-1); double curVal = shutterOpen + offset; for (int i = 0; i < numSamples - 2; ++i, curVal += offset) { offsetSamples.insert(curVal); } // Add an extra leading or trailing frame on an // integer boundary for the rest of the pipeline double floorVal = floor(startTime + shutterOpen); double ceilVal = ceil(endTime + shutterClose); transSamples.insert(floorVal); transSamples.insert(ceilVal); geoSamples.insert(floorVal); geoSamples.insert(ceilVal); std::set<double>::iterator samp = origSamples.begin(); std::set<double>::iterator sampEnd = origSamples.end(); for (; samp != sampEnd; ++samp) { double curSamp = *samp; std::set<double>::iterator offset = offsetSamples.begin(); std::set<double>::iterator offsetEnd = offsetSamples.end(); for (; offset != offsetEnd; ++offset) { double curVal = curSamp + (*offset); double rndVal = roundf(curVal); // if the value is close enough to the integer value // insert the integer value if (fabs(curVal - rndVal) < 1e-4) { transSamples.insert(rndVal); // ignore geometry sampling flag because it is a whole // frame and for some reason we always want to // translate the whole frames geoSamples.insert(rndVal); } else if (sampleGeo) { transSamples.insert(curVal); geoSamples.insert(curVal); } else { // we aren't include subsampled geometry transSamples.insert(curVal); } } // for offset } // for samp } // if we need to apply motion blur AbcWriteJobPtr job(new AbcWriteJob(dagPath, fileName.c_str(), useSelectionList, worldSpace, writeVisibility, writeUVs, transSamples, transTime, geoSamples, geoTime, melPerFrameCallback, melPostCallback, pythonPerFrameCallback, pythonPostCallback, prefixFilter, attribsSet)); jobList.push_front(job); // make sure we add additional whole frames, if we arent skipping // the inbetween ones if (!skipFrame && !allFrameRange.empty() && !transSamples.empty()) { double localMin = *(transSamples.begin()); std::set<double>::iterator last = transSamples.end(); last--; double localMax = *last; double globalMin = *(allFrameRange.begin()); last = allFrameRange.end(); last--; double globalMax = *last; // if the min of our current frame range is beyond // what we know about, pad a few more frames if (localMin > globalMax) { for (double f = globalMax; f < localMin; f++) { allFrameRange.insert(f); } } // if the max of our current frame range is beyond // what we know about, pad a few more frames if (localMax < globalMin) { for (double f = localMax; f < globalMin; f++) { allFrameRange.insert(f); } } } // right now we just copy over the translation samples since // they are guaranteed to contain all the geometry samples if (!transSamples.empty()) allFrameRange.insert(transSamples.begin(), transSamples.end()); } // ================ end of argument parsing ========================= // add extra evaluation run up, if necessary if (startEvaluationTime != FLT_MAX && !allFrameRange.empty()) { double firstFrame = *allFrameRange.begin(); for (double f = startEvaluationTime; f < firstFrame; ++f) { allFrameRange.insert(f); } } std::set<double>::iterator it = allFrameRange.begin(); std::set<double>::iterator itEnd = allFrameRange.end(); // loop through every frame in the list, if a job has that frame in it's // list of transform or shape frames, then it will write out data and // call the perFrameCallback, if that frame is also the last one it has // to work on then it will also call the postCallback. // If it doesn't have this frame, then it does nothing for (; it != itEnd; it++) { if (debug) { double frame = *it; MString info; info = frame; MGlobal::displayInfo(info); } MGlobal::viewFrame(*it); std::list< AbcWriteJobPtr >::iterator j = jobList.begin(); std::list< AbcWriteJobPtr >::iterator jend = jobList.end(); while (j != jend) { bool lastFrame = (*j)->eval(*it); // if (lastFrame) { j = jobList.erase(j); } else j++; } } // set the time back MGlobal::viewFrame(oldCurTime); return MS::kSuccess; }
bool OPvPCapturePoint::HandlePlayerEnter(Player* player) { if (m_capturePoint) { player->SendUpdateWorldState(m_capturePoint->GetGOInfo()->capturePoint.worldState1, 1); player->SendUpdateWorldState(m_capturePoint->GetGOInfo()->capturePoint.worldstate2, (uint32)ceil((m_value + m_maxValue) / (2 * m_maxValue) * 100.0f)); player->SendUpdateWorldState(m_capturePoint->GetGOInfo()->capturePoint.worldstate3, m_neutralValuePct); } return m_activePlayers[player->GetTeamId()].insert(player).second; }
void Mouse::setType(int type) { delete _animation; _animation = 0; _type = type; switch(_type) { case BIG_ARROW: loadFromSurface(ResourceManager::surface("art/intrface/stdarrow.frm")); setXOffset(0); setYOffset(0); _lastType = _type; break; case SCROLL_W: loadFromSurface(ResourceManager::surface("art/intrface/scrwest.frm")); setYOffset( - ceil(height()/2)); setXOffset(0); break; case SCROLL_W_X: loadFromSurface(ResourceManager::surface("art/intrface/scrwx.frm")); setYOffset( - ceil(height()/2)); setXOffset(0); break; case SCROLL_N: loadFromSurface(ResourceManager::surface("art/intrface/scrnorth.frm")); setXOffset( - ceil(width()/2)); setYOffset(0); break; case SCROLL_N_X: loadFromSurface(ResourceManager::surface("art/intrface/scrnx.frm")); setXOffset( - ceil(width()/2)); setYOffset(0); break; case SCROLL_S: loadFromSurface(ResourceManager::surface("art/intrface/scrsouth.frm")); setXOffset( - ceil(width()/2)); setYOffset( - height()); break; case SCROLL_S_X: loadFromSurface(ResourceManager::surface("art/intrface/scrsx.frm")); setXOffset(- ceil(width()/2)); setYOffset(- height()); break; case SCROLL_E: loadFromSurface(ResourceManager::surface("art/intrface/screast.frm")); setXOffset( - width()); setYOffset( - ceil(height()/2)); break; case SCROLL_E_X: loadFromSurface(ResourceManager::surface("art/intrface/screx.frm")); setXOffset(- width()); setYOffset(- ceil(height()/2)); break; case SCROLL_NW: loadFromSurface(ResourceManager::surface("art/intrface/scrnwest.frm")); setXOffset(0); setYOffset(0); break; case SCROLL_NW_X: loadFromSurface(ResourceManager::surface("art/intrface/scrnwx.frm")); setXOffset(0); setYOffset(0); break; case SCROLL_SW: loadFromSurface(ResourceManager::surface("art/intrface/scrswest.frm")); setXOffset(0); setYOffset(- height()); break; case SCROLL_SW_X: loadFromSurface(ResourceManager::surface("art/intrface/scrswx.frm")); setXOffset(0); setYOffset(- height()); break; case SCROLL_NE: loadFromSurface(ResourceManager::surface("art/intrface/scrneast.frm")); setXOffset(- width()); setYOffset(0); break; case SCROLL_NE_X: loadFromSurface(ResourceManager::surface("art/intrface/scrnex.frm")); setXOffset(- width()); setYOffset(0); break; case SCROLL_SE: loadFromSurface(ResourceManager::surface("art/intrface/scrseast.frm")); setXOffset(- width()); setYOffset(- height()); break; case SCROLL_SE_X: loadFromSurface(ResourceManager::surface("art/intrface/scrsex.frm")); setXOffset(- width()); setYOffset(- height()); break; case HEXAGON_RED: loadFromSurface(ResourceManager::surface("art/intrface/msef000.frm")); setXOffset(- width()/2); setYOffset(- height()/2); _lastType = _type; break; case ACTION: loadFromSurface(ResourceManager::surface("art/intrface/actarrow.frm")); setXOffset(0); setYOffset(0); _lastType = _type; break; case WAIT: _animation = new Animation("art/intrface/wait.frm"); _animation->setEnabled(true); setXOffset(- width()/2); setYOffset(- height()/2); _lastType = _type; break; case NONE: loadFromSurface(new Surface()); break; } }
void OPvPCapturePoint::SendChangePhase() { if (!m_capturePoint) return; // send this too, sometimes the slider disappears, dunno why :( SendUpdateWorldState(m_capturePoint->GetGOInfo()->capturePoint.worldState1, 1); // send these updates to only the ones in this objective SendUpdateWorldState(m_capturePoint->GetGOInfo()->capturePoint.worldstate2, (uint32)ceil((m_value + m_maxValue) / (2 * m_maxValue) * 100.0f)); // send this too, sometimes it resets :S SendUpdateWorldState(m_capturePoint->GetGOInfo()->capturePoint.worldstate3, m_neutralValuePct); }
static VOID PAL_MouseEventFilter( const SDL_Event *lpEvent ) /*++ Purpose: Handle mouse events. Parameters: [IN] lpEvent - pointer to the event. Return value: None. --*/ { #if PAL_HAS_MOUSE static short hitTest = 0; // Double click detect; const SDL_VideoInfo *vi; double screenWidth, gridWidth; double screenHeight, gridHeight; double mx, my; double thumbx; double thumby; INT gridIndex; BOOL isLeftMouseDBClick = FALSE; BOOL isLeftMouseClick = FALSE; BOOL isRightMouseClick = FALSE; static INT lastReleaseButtonTime, lastPressButtonTime, betweenTime; static INT lastPressx = 0; static INT lastPressy = 0; static INT lastReleasex = 0; static INT lastReleasey = 0; if (lpEvent->type!= SDL_MOUSEBUTTONDOWN && lpEvent->type != SDL_MOUSEBUTTONUP) return; vi = SDL_GetVideoInfo(); screenWidth = vi->current_w; screenHeight = vi->current_h; gridWidth = screenWidth / 3; gridHeight = screenHeight / 3; mx = lpEvent->button.x; my = lpEvent->button.y; thumbx = ceil(mx / gridWidth); thumby = floor(my / gridHeight); gridIndex = thumbx + thumby * 3 - 1; switch (lpEvent->type) { case SDL_MOUSEBUTTONDOWN: lastPressButtonTime = SDL_GetTicks(); lastPressx = lpEvent->button.x; lastPressy = lpEvent->button.y; switch (gridIndex) { case 2: g_InputState.prevdir = g_InputState.dir; g_InputState.dir = kDirNorth; break; case 6: g_InputState.prevdir = g_InputState.dir; g_InputState.dir = kDirSouth; break; case 0: g_InputState.prevdir = g_InputState.dir; g_InputState.dir = kDirWest; break; case 8: g_InputState.prevdir = g_InputState.dir; g_InputState.dir = kDirEast; break; case 1: //g_InputState.prevdir = g_InputState.dir; //g_InputState.dir = kDirNorth; g_InputState.dwKeyPress |= kKeyUp; break; case 7: //g_InputState.prevdir = g_InputState.dir; //g_InputState.dir = kDirSouth; g_InputState.dwKeyPress |= kKeyDown; break; case 3: //g_InputState.prevdir = g_InputState.dir; //g_InputState.dir = kDirWest; g_InputState.dwKeyPress |= kKeyLeft; break; case 5: //g_InputState.prevdir = g_InputState.dir; //g_InputState.dir = kDirEast; g_InputState.dwKeyPress |= kKeyRight; break; } break; case SDL_MOUSEBUTTONUP: lastReleaseButtonTime = SDL_GetTicks(); lastReleasex = lpEvent->button.x; lastReleasey = lpEvent->button.y; hitTest ++; if (abs(lastPressx - lastReleasex) < 25 && abs(lastPressy - lastReleasey) < 25) { betweenTime = lastReleaseButtonTime - lastPressButtonTime; if (betweenTime >500) { isRightMouseClick = TRUE; } else if (betweenTime >=0) { if((betweenTime < 100) && (hitTest >= 2)) { isLeftMouseClick = TRUE; hitTest = 0; } else { isLeftMouseClick = TRUE; if(betweenTime > 100) { hitTest = 0; } } } } switch (gridIndex) { case 2: if( isLeftMouseDBClick ) { AUDIO_IncreaseVolume(); break; } case 6: case 0: if( isLeftMouseDBClick ) { AUDIO_DecreaseVolume(); break; } case 7: if (isRightMouseClick) //repeat attack { g_InputState.dwKeyPress |= kKeyRepeat; break; } case 8: g_InputState.dir = kDirUnknown; g_InputState.prevdir = kDirUnknown; break; case 1: if( isRightMouseClick ) { g_InputState.dwKeyPress |= kKeyForce; } break; case 3: if( isRightMouseClick ) { g_InputState.dwKeyPress |= kKeyAuto; } break; case 5: if( isRightMouseClick ) { g_InputState.dwKeyPress |= kKeyDefend; } break; case 4: if (isRightMouseClick) // menu { g_InputState.dwKeyPress |= kKeyMenu; } else if (isLeftMouseClick) // search { g_InputState.dwKeyPress |= kKeySearch; } break; } break; } #endif }
static void DrawSkyBox( shader_t *shader ) { int i; sky_min = 0; sky_max = 1; Com_Memset( s_skyTexCoords, 0, sizeof( s_skyTexCoords ) ); for (i=0 ; i<6 ; i++) { int sky_mins_subd[2], sky_maxs_subd[2]; int s, t; sky_mins[0][i] = floor( sky_mins[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; sky_mins[1][i] = floor( sky_mins[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; sky_maxs[0][i] = ceil( sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; sky_maxs[1][i] = ceil( sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; if ( ( sky_mins[0][i] >= sky_maxs[0][i] ) || ( sky_mins[1][i] >= sky_maxs[1][i] ) ) { continue; } sky_mins_subd[0] = sky_mins[0][i] * HALF_SKY_SUBDIVISIONS; sky_mins_subd[1] = sky_mins[1][i] * HALF_SKY_SUBDIVISIONS; sky_maxs_subd[0] = sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS; sky_maxs_subd[1] = sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS; if ( sky_mins_subd[0] < -HALF_SKY_SUBDIVISIONS ) sky_mins_subd[0] = -HALF_SKY_SUBDIVISIONS; else if ( sky_mins_subd[0] > HALF_SKY_SUBDIVISIONS ) sky_mins_subd[0] = HALF_SKY_SUBDIVISIONS; if ( sky_mins_subd[1] < -HALF_SKY_SUBDIVISIONS ) sky_mins_subd[1] = -HALF_SKY_SUBDIVISIONS; else if ( sky_mins_subd[1] > HALF_SKY_SUBDIVISIONS ) sky_mins_subd[1] = HALF_SKY_SUBDIVISIONS; if ( sky_maxs_subd[0] < -HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[0] = -HALF_SKY_SUBDIVISIONS; else if ( sky_maxs_subd[0] > HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[0] = HALF_SKY_SUBDIVISIONS; if ( sky_maxs_subd[1] < -HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[1] = -HALF_SKY_SUBDIVISIONS; else if ( sky_maxs_subd[1] > HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[1] = HALF_SKY_SUBDIVISIONS; // // iterate through the subdivisions // for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t <= sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ ) { for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s <= sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ ) { MakeSkyVec( ( s - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, ( t - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, i, s_skyTexCoords[t][s], s_skyPoints[t][s] ); } } DrawSkySide( shader->sky.outerbox[sky_texorder[i]], sky_mins_subd, sky_maxs_subd ); } }
EmoticonsWidget::EmoticonsWidget(QWidget *parent) : QMenu(parent) { setStyleSheet(Style::getStylesheet(":/ui/emoticonWidget/emoticonWidget.css")); setLayout(&layout); layout.addWidget(&stack); QWidget* pageButtonsContainer = new QWidget; QHBoxLayout* buttonLayout = new QHBoxLayout; pageButtonsContainer->setLayout(buttonLayout); layout.addWidget(pageButtonsContainer); const int maxCols = 5; const int maxRows = 3; const int itemsPerPage = maxRows * maxCols; const QList<QStringList>& emoticons = SmileyPack::getInstance().getEmoticons(); int itemCount = emoticons.size(); int pageCount = ceil(float(itemCount) / float(itemsPerPage)); int currPage = 0; int currItem = 0; int row = 0; int col = 0; // create pages buttonLayout->addStretch(); for (int i = 0; i < pageCount; i++) { QGridLayout* pageLayout = new QGridLayout; pageLayout->addItem(new QSpacerItem(0, 0, QSizePolicy::Minimum, QSizePolicy::Expanding), maxRows, 0); pageLayout->addItem(new QSpacerItem(0, 0, QSizePolicy::Expanding, QSizePolicy::Minimum), 0, maxCols); QWidget* page = new QWidget; page->setLayout(pageLayout); stack.addWidget(page); // page buttons are only needed if there is more than 1 page if (pageCount > 1) { QRadioButton* pageButton = new QRadioButton; pageButton->setProperty("pageIndex", i); pageButton->setCursor(Qt::PointingHandCursor); pageButton->setChecked(i == 0); buttonLayout->addWidget(pageButton); connect(pageButton, &QRadioButton::clicked, this, &EmoticonsWidget::onPageButtonClicked); } } buttonLayout->addStretch(); for (const QStringList& set : emoticons) { QPushButton* button = new QPushButton; button->setIcon(SmileyPack::getInstance().getAsIcon(set[0])); button->setToolTip(set.join(" ")); button->setProperty("sequence", set[0]); button->setCursor(Qt::PointingHandCursor); button->setFlat(true); connect(button, &QPushButton::clicked, this, &EmoticonsWidget::onSmileyClicked); qobject_cast<QGridLayout*>(stack.widget(currPage)->layout())->addWidget(button, row, col); col++; currItem++; // next row if (col >= maxCols) { col = 0; row++; } // next page if (currItem >= itemsPerPage) { row = 0; currItem = 0; currPage++; } } // calculates sizeHint layout.activate(); }
static void FillCloudBox( const shader_t *shader, int stage ) { int i; for ( i =0; i < 6; i++ ) { int sky_mins_subd[2], sky_maxs_subd[2]; int s, t; float MIN_T; if ( 1 ) // FIXME? shader->sky.fullClouds ) { MIN_T = -HALF_SKY_SUBDIVISIONS; // still don't want to draw the bottom, even if fullClouds if ( i == 5 ) continue; } else { switch( i ) { case 0: case 1: case 2: case 3: MIN_T = -1; break; case 5: // don't draw clouds beneath you continue; case 4: // top default: MIN_T = -HALF_SKY_SUBDIVISIONS; break; } } sky_mins[0][i] = floor( sky_mins[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; sky_mins[1][i] = floor( sky_mins[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; sky_maxs[0][i] = ceil( sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; sky_maxs[1][i] = ceil( sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; if ( ( sky_mins[0][i] >= sky_maxs[0][i] ) || ( sky_mins[1][i] >= sky_maxs[1][i] ) ) { continue; } sky_mins_subd[0] = ri.ftol(sky_mins[0][i] * HALF_SKY_SUBDIVISIONS); sky_mins_subd[1] = ri.ftol(sky_mins[1][i] * HALF_SKY_SUBDIVISIONS); sky_maxs_subd[0] = ri.ftol(sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS); sky_maxs_subd[1] = ri.ftol(sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS); if ( sky_mins_subd[0] < -HALF_SKY_SUBDIVISIONS ) sky_mins_subd[0] = -HALF_SKY_SUBDIVISIONS; else if ( sky_mins_subd[0] > HALF_SKY_SUBDIVISIONS ) sky_mins_subd[0] = HALF_SKY_SUBDIVISIONS; if ( sky_mins_subd[1] < MIN_T ) sky_mins_subd[1] = MIN_T; else if ( sky_mins_subd[1] > HALF_SKY_SUBDIVISIONS ) sky_mins_subd[1] = HALF_SKY_SUBDIVISIONS; if ( sky_maxs_subd[0] < -HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[0] = -HALF_SKY_SUBDIVISIONS; else if ( sky_maxs_subd[0] > HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[0] = HALF_SKY_SUBDIVISIONS; if ( sky_maxs_subd[1] < MIN_T ) sky_maxs_subd[1] = MIN_T; else if ( sky_maxs_subd[1] > HALF_SKY_SUBDIVISIONS ) sky_maxs_subd[1] = HALF_SKY_SUBDIVISIONS; // // iterate through the subdivisions // for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t <= sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ ) { for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s <= sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ ) { MakeSkyVec( ( s - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, ( t - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, i, NULL, s_skyPoints[t][s] ); s_skyTexCoords[t][s][0] = s_cloudTexCoords[i][t][s][0]; s_skyTexCoords[t][s][1] = s_cloudTexCoords[i][t][s][1]; } } // only add indexes for first stage FillCloudySkySide( sky_mins_subd, sky_maxs_subd, ( stage == 0 ) ); } }
void imageCallback(const sensor_msgs::ImageConstPtr& msg) { //bridge that will transform the message (image) from ROS code back to "image" code sensor_msgs::CvBridge bridge; fprintf(stderr, "\n call Back funtion \n"); //publish data (obstacle waypoints) back to the boat ros::NodeHandle n; std_msgs::Float32 xWaypoint_msg; // X coordinate obstacle message object std_msgs::Float32 zWaypoint_msg; // Y coordinate obstacle message object //std::stringstream ss; //publish the waypoint data ros::Publisher Xwaypoint_info_pub = n.advertise<std_msgs::Float32>("Xwaypoint_info", 0.01); ros::Publisher Zwaypoint_info_pub = n.advertise<std_msgs::Float32>("Zwaypoint_info", 0.01); /***********************************************************************/ //live image coming streamed straight from the boat's camera IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8"); //The boat takes flipped images, so you need to flip them back to normal cvFlip(boatFront, boatFront, 0); IplImage* backUpImage = cvCloneImage(boatFront); boatFront->origin = IPL_ORIGIN_TL; //sets image origin to top left corner int X = boatFront->height; int Y = boatFront->width; //cout << "height " << X << endl; //cout << "width " << Y << endl; /*********************Image Filtering variables****************************/ //these images are used for segmenting objects from the overall background //create a one channel image to convert from RGB to GRAY IplImage* grayImage = cvCreateImage(cvGetSize(boatFront),IPL_DEPTH_8U,1); //convert grayImage to binary (final step after converting from GRAY) IplImage* bwImage = cvCreateImage(cvGetSize(grayImage),IPL_DEPTH_8U,1); //variables used for the flood fill segmentation CvPoint seed_point = cvPoint(boatFront->height/2 + 70,0); //not sure how this variable works CvScalar color = CV_RGB(250,0,0); CvMemStorage* grayStorage = NULL; //memory storage for contour sequence CvSeq* contours = 0; // get blobs and filter them using their area //IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL); //IplImage* original, *originalThr; //IplImage* segmentated = cvCreateImage(cvGetSize(boatFront), 8, 1); //unsigned int blobNumber = 0; //IplImage* labelImg = cvCreateImage(cvGetSize(boatFront), IPL_DEPTH_LABEL, 1); CvMoments moment; /***********************************************************************/ //boat's edge distance from the camera. This is used for visual calibration //to know the distance from the boat to the nearest obstacles. //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side //and 15 inches (0.381 m). //float boatFrontDistance = 0.381; //distance in meters //float boatSideDistance = 0.5334; //distance in meters // These variables tell the distance from the center bottom of the image // (the camera) to the square surrounding a the obstacle float obstacleDistance = 0.0; //Euclidean distance to object float obstacleHeading = 0.0; //distance variables from the camera calibration matrix int xPixel = 0; //pixels from left to right int yPixel = 0; //pixels from bottom to top float zObstacleDistance = 0; //object distance from the camera float xObstacleDistance = 0; float yObstacleDistance = 0.1143; //distance in meters from water to camera. //its gonna be constant assuming boat barely moves up and down in the water int pixelsNumber = 50; //number of pixels for an n x n matrix and # of neighbors const int arraySize = pixelsNumber; const int threeArraySize = pixelsNumber; //if n gets changed, then the algorithm might have to be //recalibrated. Try to keep it constant //these variables are used for the k nearest neighbors //int accuracy; //reponses for each of the classifications float responseWaterH, responseWaterS, responseWaterV; float responseGroundH, responseGroundS, responseGroundV; float responseSkyH, responseSkyS, responseSkyV; float averageHue = 0.0; float averageSat = 0.0; float averageVal = 0.0; CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample ); //used with the classifier CvMat* trainClassesH = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); CvMat* trainClassesS = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); CvMat* trainClassesV = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); //CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample ); //used with the classifier /*CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1); //Distance CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1); */ //these variables are use to traverse the picture by blocks of n x n pixels at //a time. //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the //right way, of course) //x and y are the dimensions of the local patch of pixels int x = (boatFront->height)/2 + 70;//(boatFront->height)/2.5 + 105; int y = 0; int skyX = 0; int skyY = 0; int row1 = 0; int column1 = 0; //these two variables are used in order to divide the grid in the //resample segmentation part int xDivisor = 200; int yDivisor = 200; //ground sample //CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //water sample CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //n x n sample patch taken from the picture CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1); CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1); CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1); CvMat* resampleHue0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* resampleSat0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* resampleVal0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* resampleHue = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1); CvMat* resampleSat = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1); CvMat* resampleVal = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1); int xDiv = 20; int yDiv = 20; CvMat* resampleHue2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1); CvMat* resampleSat2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1); CvMat* resampleVal2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1); //sky training sample CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1); CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1); CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1); //initialize each matrix element to zero for ease of use //cvZero(groundTrainingHue); //cvZero(groundTrainingSat); //cvZero(groundTrainingVal); cvZero(waterTrainingHue); cvZero(waterTrainingSat); cvZero(waterTrainingVal); cvZero(sampleHue); cvZero(sampleSat); cvZero(sampleVal); cvZero(resampleHue0); cvZero(resampleSat0); cvZero(resampleVal0); cvZero(resampleHue); cvZero(resampleSat); cvZero(resampleVal); cvZero(skyTrainingHue); cvZero(skyTrainingSat); cvZero(skyTrainingVal); //Stores the votes for each channel (whether it belongs to water or not //1 is part of water, 0 not part of water //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water int votesSum = 0; int comparator[3]; //used when only three votes are needed //int comparatorTwo [3][3]; //used when six votes are needed //initial sum of votes is zero //Error if initialize both matrices inside a single for loop. Dont know why for(int i = 0; i < 3; i++) { comparator[i] = 0; } /***********************************************************************/ //Convert from RGB to HSV to control the brightness of the objects. //work with reflexion /*Sky recognition. Might be useful for detecting reflexion on the water. If the sky is detected, and the reflection has the same characteristics of something below the horizon, that "something" might be water. Assume sky wont go below the horizon */ //convert from RGB to HSV cvCvtColor(boatFront, boatFront, CV_BGR2HSV); cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV); HsvImage I(boatFront); HsvImage IBackUp(backUpImage); //Sky detection /* for (int i=0; i<boatFront->height;i++) { for (int j=0; j<boatFront->width;j++) { //if something is bright enough, consider it sky and store the //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255 if (((I[i][j].v >= 180) && (I[i][j].s <= 16))) // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144)))) { //The HSV values vary between 0 and 1 cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h); cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s); cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v); //I[i][j].h = 0.3*180; //H (color) //I[i][j].s = 0.3*180; //S (color intensity) //I[i][j].v = 0.6*180; //V (brightness) if (skyY == pixelsNumber-1) { if (skyX == pixelsNumber-1) skyX = 1; else skyX = skyX + 1; skyY = 1; } else skyY = skyY + 1; } } } /***********************************************************************/ //offline input pictures. Samples of water properties are taken from these //pictures to get a range of values for H, S, V that will be stored into a //pre-defined classifier IplImage* imageSample1 = cvLoadImage("20110805_032255.jpg"); cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83)); cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV); HsvImage I1(imageSample1); IplImage* imageSample2 = cvLoadImage("20110805_032257.jpg"); cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV); HsvImage I2(imageSample2); IplImage* imageSample3 = cvLoadImage("20110805_032259.jpg"); cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV); HsvImage I3(imageSample3); IplImage* imageSample4 = cvLoadImage("20110805_032301.jpg"); cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV); HsvImage I4(imageSample4); IplImage* imageSample5 = cvLoadImage("20110805_032303.jpg"); cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV); HsvImage I5(imageSample5); IplImage* imageSample6 = cvLoadImage("20110805_032953.jpg"); cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV); HsvImage I6(imageSample6); IplImage* imageSample7 = cvLoadImage("20110805_032955.jpg"); cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV); HsvImage I7(imageSample7); IplImage* imageSample8 = cvLoadImage("20110805_032957.jpg"); cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV); HsvImage I8(imageSample8); IplImage* imageSample9 = cvLoadImage("20110805_032959.jpg"); cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV); HsvImage I9(imageSample9); IplImage* imageSample10 = cvLoadImage("20110805_033001.jpg"); cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV); HsvImage I10(imageSample10); IplImage* imageSample11 = cvLoadImage("20110805_033009.jpg"); cvCvtColor(imageSample11, imageSample11, CV_BGR2HSV); HsvImage I11(imageSample11); IplImage* imageSample12 = cvLoadImage("20110805_033011.jpg"); cvCvtColor(imageSample12, imageSample12, CV_BGR2HSV); HsvImage I12(imageSample12); //IplImage* imageSample13 = cvLoadImage("20110812_110924.jpg"); //cvCvtColor(imageSample13, imageSample13, CV_BGR2HSV); //HsvImage I13(imageSample13); for (int i=0; i < threeArraySize; i++) { for (int j=0; j < arraySize; j++) { row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8); column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615); averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h + I11[row1][column1].h + I12[row1][column1].h) / 12; averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s + I11[row1][column1].s + I12[row1][column1].s) / 12; averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v + I11[row1][column1].v + I12[row1][column1].v) / 12; //water patch sample (n X n matrix) cvmSet(waterTrainingHue,i,j,averageHue); cvmSet(waterTrainingSat,i,j,averageSat); cvmSet(waterTrainingVal,i,j,averageVal); //patch is red (this is for me to know where the ground patch sample is) //I[row1][column1].h = 0; //I[row1][column1].s = 255; //I[row1][column1].v = 255; } } //creating a training sample from the an image taken on the fly row1 = 0; column1 = 0; for (int i=0; i<pixelsNumber; i++) { for (int j=0; j<pixelsNumber; j++) { row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8); column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615); cvmSet(trainClassesH,i,0,I[row1][column1].h); cvmSet(trainClassesS,i,0,I[row1][column1].s); cvmSet(trainClassesV,i,0,I[row1][column1].v); } } //order the water samples in ascending order on order to know a range cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING); cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING); cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING); // find the maximum and minimum values in the array to create a range int maxH = cvmGet(waterTrainingHue,0,0); int maxS = cvmGet(waterTrainingSat,0,0); int maxV = cvmGet(waterTrainingVal,0,0); int minH = cvmGet(waterTrainingHue,0,0); int minS = cvmGet(waterTrainingSat,0,0); int minV = cvmGet(waterTrainingVal,0,0); for (int i=0; i < threeArraySize; i++) { for (int j=0; j < arraySize; j++) { if (cvmGet(waterTrainingHue,i,j) > maxH) maxH = cvmGet(waterTrainingHue,i,j); if (cvmGet(waterTrainingSat,i,j) > maxS) maxS = cvmGet(waterTrainingSat,i,j); if (cvmGet(waterTrainingVal,i,j) > maxV) maxV = cvmGet(waterTrainingVal,i,j); if (cvmGet(waterTrainingHue,i,j) < minH) minH = cvmGet(waterTrainingHue,i,j); if (cvmGet(waterTrainingSat,i,j) < minS) minS = cvmGet(waterTrainingSat,i,j); if (cvmGet(waterTrainingVal,i,j) < minV) minV = cvmGet(waterTrainingVal,i,j); } } /*********** Main loop. It traverses through the picture**********/ /******************** Live water samples *******************************/ //learn how "current water" looks like on the fly row1 = 0; column1 = 0; for (int i=0; i<pixelsNumber; i++) { for (int j=0; j<pixelsNumber; j++) { //front of boat might appear in the image. Account for that row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8) - 55; column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615); cvmSet(resampleHue0,i,j,I[row1][column1].h); cvmSet(resampleSat0,i,j,I[row1][column1].s); cvmSet(resampleVal0,i,j,I[row1][column1].v); //visualize "resample" patch // I[row1][column1].h = 0; //I[row1][column1].s = 0; //I[row1][column1].v = 0; } } //order the water samples in ascending order on order to know a range cvSort(resampleHue0, resampleHue0, CV_SORT_ASCENDING); cvSort(resampleSat0, resampleSat0, CV_SORT_ASCENDING); cvSort(resampleVal0, resampleVal0, CV_SORT_ASCENDING); // find the maximum and minimum values in the array to create a range int maxH0 = cvmGet(resampleHue0,0,0); int maxS0 = cvmGet(resampleSat0,0,0); int maxV0 = cvmGet(resampleVal0,0,0); int minH0 = cvmGet(resampleHue0,0,0); int minS0 = cvmGet(resampleSat0,0,0); int minV0 = cvmGet(resampleVal0,0,0); for (int i=0; i < threeArraySize; i++) { for (int j=0; j < arraySize; j++) { if (cvmGet(resampleHue0,i,j) > maxH0) maxH0 = cvmGet(resampleHue0,i,j); if (cvmGet(resampleSat0,i,j) > maxS0) maxS0 = cvmGet(resampleSat0,i,j); if (cvmGet(resampleVal0,i,j) > maxV0) maxV0 = cvmGet(resampleVal0,i,j); if (cvmGet(resampleHue0,i,j) < minH0) minH0 = cvmGet(resampleHue0,i,j); if (cvmGet(resampleSat0,i,j) < minS0) minS0 = cvmGet(resampleSat0,i,j); if (cvmGet(resampleVal0,i,j) < minV0) minV0 = cvmGet(resampleVal0,i,j); } } for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH0 < cvmGet(sampleHue,0,j)) && (maxH0 > cvmGet(sampleHue,0,j))) //mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((minS0 < cvmGet(sampleSat,0,j)) && (maxS0 > cvmGet(sampleSat,0,j))) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((minV0 < cvmGet(sampleVal,0,j)) && (maxV0 > cvmGet(sampleVal,0,j))) //mark water samples as red comparator[2] = 1; else comparator[2] = 0; //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { //use the known water samples as new training data //if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor)) //{ // cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j)); // cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j)); // cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j)); //} //6 use to be equal to pixelsNumber. I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } //ix = 0; } /********************************************************************* // Use nearest neighbors to increase accuracy skyX = 0; skyY = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } //Find the shortest distance between a pixel and the neighbors from each of //the training samples (sort of inefficient, but might do the job...sometimes) //HSV for water sample // learn classifier //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber); CvKNearest knnWaterHue(waterTrainingHue, trainClassesH, 0, false, pixelsNumber); CvKNearest knnWaterSat(waterTrainingSat, trainClassesS, 0, false, pixelsNumber); CvKNearest knnWaterVal(waterTrainingVal, trainClassesV, 0, false, pixelsNumber); //HSV for ground sample //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber); //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber); //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber); //HSV for sky sample //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0) //{ // CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber); // CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber); // CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber); //} //scan nearest neighbors to each pixel responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0); responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0); responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0); //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0); //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0); //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0); //for (int i=0;i<pixelsNumber;i++) //{ for (int j=0;j<pixelsNumber;j++) { if ((nearestWaterH->data.fl[j] == responseWaterH) )//&& (nearestWaterH->data.fl[j] == responseWaterH + 5)) // mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((nearestWaterS->data.fl[j] == responseWaterS) )//&& (nearestWaterS->data.fl[j] < responseWaterS + 5)) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((nearestWaterV->data.fl[j] == responseWaterV) )//&& (nearestWaterV->data.fl[j] < responseWaterV + 5)) //mark water samples as green comparator[2] = 1; else comparator[2] = 0; // similar sky pixels on the water //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } // ix = 0; } /*********************************************************************/ for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j))) //mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((minS < cvmGet(sampleSat,0,j)) && (maxS > cvmGet(sampleSat,0,j))) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((minV < cvmGet(sampleVal,0,j)) && (maxV > cvmGet(sampleVal,0,j))) //mark water samples as red comparator[2] = 1; else comparator[2] = 0; //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { //use the known water samples as new training data if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor)) { cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j)); cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j)); cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j)); } //6 use to be equal to pixelsNumber. I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } //ix = 0; } /***************Deal with reflection*****************/ for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; votesSum = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j))) //mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((0.8*255 > cvmGet(sampleSat,0,j)))// && (maxS < cvmGet(sampleSat,0,j))) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((0.6*255 < cvmGet(sampleVal,0,j)))// || (maxV < cvmGet(sampleVal,0,j))) //mark water samples as green comparator[2] = 1; else comparator[2] = 0; //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { //use the known water samples as new training data if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor)) { cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j)); cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j)); cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j)); } //6 use to be equal to pixelsNumber. I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } //ix = 0; } /**********Resample the entire patch**********/ /*********find a new min and max for a new sample range*************/ for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; votesSum = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; maxH = cvmGet(resampleHue,0,0); maxS = cvmGet(resampleSat,0,0); maxV = cvmGet(resampleVal,0,0); minH = cvmGet(resampleHue,0,0); minS = cvmGet(resampleSat,0,0); minV = cvmGet(resampleVal,0,0); for (int i=0; i < boatFront->height/xDivisor; i++) { for (int j=0; j < boatFront->width/yDivisor; j++) { if (cvmGet(resampleHue,i,j) > maxH) maxH = cvmGet(resampleHue,i,j); if (cvmGet(resampleSat,i,j) > maxS) maxS = cvmGet(resampleSat,i,j); if (cvmGet(resampleVal,i,j) > maxV) maxV = cvmGet(resampleVal,i,j); if (cvmGet(resampleHue,i,j) < minH) minH = cvmGet(resampleHue,i,j); if (cvmGet(resampleSat,i,j) < minS) minS = cvmGet(resampleSat,i,j); if (cvmGet(resampleVal,i,j) < minV) minV = cvmGet(resampleVal,i,j); } } while (x < X-1) { for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH < I[x][y-6+j].h) && (maxH > I[x][y-6+j].h)) //mark water samples as red I[x][y-6+j].h = 0; else comparator[0] = 0; if ((minS < I[x][y-6+j].s) && (maxS > I[x][y-6+j].s)) //mark water samples as red I[x][y-6+j].s = 255; else comparator[1] = 0; if ((minV < I[x][y-6+j].v) && (maxV > I[x][y-6+j].v)) //mark water samples as red I[x][y-6+j].v = 255; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } } //cout << "Sample data from current images" << endl; //for (int i = 0; i<20;i++) //{ // cout << "HUE: " << cvmGet(sampleHue,0,i) << endl; // cout << "Saturation: " << cvmGet(sampleSat,0,i) << endl; // cout << "Value: " << cvmGet(sampleVal,0,i) << endl; //} //traverse through the image one more time, divide the image in grids of // 500x500 pixels, and see how many pixels of water are in each grid. If // most of the pixels are labeled water, then mark all the other pixels // as water as well //int counter = 0; votesSum = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; /***************Divide the picture in cells for filtering**********/ while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i < boatFront->height/xDivisor; i++) { for(int j = 0; j < boatFront->width/yDivisor; j++) { cvmSet(resampleHue2,i,j,I[x+i][y+j].h); cvmSet(resampleSat2,i,j,I[x+i][y+j].s); cvmSet(resampleVal2,i,j,I[x+i][y+j].v); if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255) { votesSum++; } } } if (votesSum > (((boatFront->height/xDivisor)*(boatFront->width/yDivisor))*(4/5))) { // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water // We might need to use other smaller quantities (like 5/6 maybe?) for (int i = 0; i < boatFront->height/xDivisor;i++) { for (int j = 0; j < boatFront->width/yDivisor; j++) { row1 = x + i; if (row1 > X-1) row1 = X-1; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = 0; I[row1][column1].s = 255; I[row1][column1].v = 255; } } } else { // If not water, eliminate all red pixels and turn those pixels // back to the original color for (int i = 0; i < boatFront->height/xDivisor;i++) { for (int j = 0; j < boatFront->width/yDivisor; j++) { row1 = x + i; if (row1 > X-1) row1 = X-1; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = IBackUp[row1][column1].h;//255;//IBackUp[row1][column1].h; I[row1][column1].s = IBackUp[row1][column1].s;//255;//IBackUp[row1][column1].s; I[row1][column1].v = IBackUp[row1][column1].v;//255;//IBackUp[row1][column1].v; } } } y = y + boatFront->width/xDivisor; if (y > Y-1) { x = x + boatFront->height/yDivisor; y = 0; } votesSum = 0; } /********************Isolate obstacles************************/ votesSum = 0; int paint = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 40; y = 0; xDiv = 40; yDiv = 40; /***************Divide the picture in cells for filtering**********/ // Small pixel areas (noise) are going to be eliminated from the picture // living only the big obstacles while (x < X-2) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i < boatFront->height/xDiv; i++) { for(int j = 0; j < boatFront->width/yDiv; j++) { row1 = x + i; if (row1 > X-2) row1 = X-2; column1 = y+j; if (column1 > Y-1) column1 = Y-1; cvmSet(resampleHue2,i,j,I[row1][column1].h); cvmSet(resampleSat2,i,j,I[row1][column1].s); cvmSet(resampleVal2,i,j,I[row1][column1].v); if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255) { votesSum++; } } } if (votesSum > (((boatFront->height/xDiv)*(boatFront->width/yDiv))*(4/5))) { // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water // We might need to use other smaller quantities (like 5/6 maybe?) for (int i = 0; i < boatFront->height/xDiv;i++) { for (int j = 0; j < boatFront->width/yDiv; j++) { row1 = x + i; if (row1 > X-2) row1 = X-2; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = 0; I[row1][column1].s = 255; I[row1][column1].v = 255; } } } else { int count = 0; // If not water, eliminate all red pixels and turn those pixels // back to the original color for (int i = 0; i < boatFront->height/xDiv;i++) { for (int j = 0; j < boatFront->width/yDiv; j++) { row1 = x + i; if (row1 > X-2) row1 = X-2; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = IBackUp[row1][column1].h;//255; I[row1][column1].s = IBackUp[row1][column1].s;//255; I[row1][column1].v = IBackUp[row1][column1].v;//255; // count++; } } } y = y + boatFront->width/yDiv; if (y > Y-1) { x = x + boatFront->height/xDiv; if (x > X-2) x = X-2; y = 0; } votesSum = 0; } /****************Find Obstacles boundaries*********************************/ if( grayStorage == NULL ) { grayStorage = cvCreateMemStorage(0); } else { cvClearMemStorage(grayStorage); } //backUpImage = cvCloneImage(boatFront); //IBackUp(backUpImage); //Ignore unused parts of the image and convert them to black for (int i=0; i<backUpImage->height;i++) { for (int j=0; j<backUpImage->width;j++) { if(i < backUpImage->height/2 + 70) { IBackUp[i][j].h = 0; IBackUp[i][j].s = 0; IBackUp[i][j].v = 0; } else { IBackUp[i][j].h = I[i][j].h; IBackUp[i][j].s = I[i][j].s; IBackUp[i][j].v = I[i][j].v; } } } //convert from HSV to RGB cvCvtColor(boatFront, boatFront, CV_HSV2BGR); cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR); //do flood fill for obstacles cvFloodFill( backUpImage, seed_point, color, cvScalarAll(255), cvScalarAll(2), NULL, 8, NULL); //convert to to gray to do more obstacle segmentation cvCvtColor(backUpImage, grayImage, CV_BGR2GRAY); //convert to binary cvThreshold(grayImage, bwImage, 100, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); //eliminate small unnecessary pixel areas //bwImage is a pointer, so no need to reuse findCountours int findCountours = bwareaopen_(bwImage, 100); //find contours of obstacles in image cvFindContours(bwImage, grayStorage, &contours); cvZero( bwImage ); //redraw clean contours for( CvSeq* c=contours; c!=NULL; c=c->h_next) { cvDrawContours(bwImage, c, cvScalarAll(255), cvScalarAll(255), 8); //ignore obstacles/contours with are less than 100 pixels or bigger than 100000 pixels if ((cvContourArea(c, CV_WHOLE_SEQ) >= 60) && (cvContourArea(c, CV_WHOLE_SEQ) <= 100000)) { cout << "Contour area: " << cvContourArea(c, CV_WHOLE_SEQ) << endl; //area in pixels //find the x,y coordinate of the center of a contour cvMoments(c, &moment, 0); //centroid/moment of the contour/obstacle cout << "Contour center in x,y: " << moment.m10/moment.m00 << ", " << moment.m01/moment.m00 << endl; //The distance formula calculated by plotting points is given by: // Xc/Zc = Xp-cc(1)/Fc(1) // Yc/Zc = Yp-cc(2)/Fc(2) //For boat one Yc = 4.5 inches = 0.0635 meters //These formulas only work for 640X480 images // x,y coordinates of the obstacle from the bottom center of the image //Ignore everything less than 0.3 meters apart (anything too close to the boat) zObstacleDistance = 5*(yObstacleDistance*619.33108)/(X - (moment.m01/moment.m00)); xObstacleDistance = 5*zObstacleDistance*((moment.m10/moment.m00)-324.36738)/618.62586; //copy data to be published xWaypoint_msg.data = xObstacleDistance; zWaypoint_msg.data = zObstacleDistance; //publish data Xwaypoint_info_pub.publish(xWaypoint_msg); Zwaypoint_info_pub.publish(zWaypoint_msg); //try to ignore obstacle that are too close. Robot shall tell operator if there is //a problem with a close by obstacle //obstacle distance obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2) + pow(zObstacleDistance,2)); //Just use the 2D angle obstacleHeading = tan((zObstacleDistance/xObstacleDistance)*PI/180); cout << "Obstacle polar coordinates: " << endl; cout << "z: " << zObstacleDistance << " x: " << xObstacleDistance << endl; cout << "Distance (meters) " << obstacleDistance << endl; cout << "Direction (degrees): " << obstacleHeading << endl << endl; } } /**************************************************************************/ //deal with memory management. How do I get read of the arrays and pointers I am not using inside the callback function?????? try { //fprintf(stderr,"\n boatFront\n"); cvShowImage("Boat Front", backUpImage); //cvShowImage("Color Segment", backUpImage); //cvShowImage("Obstacles", bwImage); } catch (sensor_msgs::CvBridgeException& e) { ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str()); } }
// Generate a sprite font from TTF file data (font size required) // TODO: Review texture packing method and generation (use oversampling) static SpriteFont LoadTTF(const char *fileName, int fontSize, int numChars, int *fontChars) { // NOTE: Font texture size is predicted (being as much conservative as possible) // Predictive method consist of supposing same number of chars by line-column (sqrtf) // and a maximum character width of 3/4 of fontSize... it worked ok with all my tests... int textureSize = GetNextPOT(ceil((float)fontSize*3/4)*ceil(sqrtf((float)numChars))); TraceLog(INFO, "TTF spritefont loading: Predicted texture size: %ix%i", textureSize, textureSize); unsigned char *ttfBuffer = (unsigned char *)malloc(1 << 25); unsigned char *dataBitmap = (unsigned char *)malloc(textureSize*textureSize*sizeof(unsigned char)); // One channel bitmap returned! stbtt_bakedchar *charData = (stbtt_bakedchar *)malloc(sizeof(stbtt_bakedchar)*numChars); SpriteFont font = { 0 }; FILE *ttfFile = fopen(fileName, "rb"); if (ttfFile == NULL) { TraceLog(WARNING, "[%s] TTF file could not be opened", fileName); return font; } fread(ttfBuffer, 1, 1<<25, ttfFile); if (fontChars[0] != 32) TraceLog(WARNING, "TTF spritefont loading: first character is not SPACE(32) character"); // NOTE: Using stb_truetype crappy packing method, no guarante the font fits the image... // TODO: Replace this function by a proper packing method and support random chars order, // we already receive a list (fontChars) with the ordered expected characters int result = stbtt_BakeFontBitmap(ttfBuffer, 0, fontSize, dataBitmap, textureSize, textureSize, fontChars[0], numChars, charData); //if (result > 0) TraceLog(INFO, "TTF spritefont loading: first unused row of generated bitmap: %i", result); if (result < 0) TraceLog(WARNING, "TTF spritefont loading: Not all the characters fit in the font"); free(ttfBuffer); // Convert image data from grayscale to to UNCOMPRESSED_GRAY_ALPHA unsigned char *dataGrayAlpha = (unsigned char *)malloc(textureSize*textureSize*sizeof(unsigned char)*2); // Two channels for (int i = 0, k = 0; i < textureSize*textureSize; i++, k += 2) { dataGrayAlpha[k] = 255; dataGrayAlpha[k + 1] = dataBitmap[i]; } free(dataBitmap); // Sprite font generation from TTF extracted data Image image; image.width = textureSize; image.height = textureSize; image.mipmaps = 1; image.format = UNCOMPRESSED_GRAY_ALPHA; image.data = dataGrayAlpha; font.texture = LoadTextureFromImage(image); //WritePNG("generated_ttf_image.png", (unsigned char *)image.data, image.width, image.height, 2); UnloadImage(image); // Unloads dataGrayAlpha font.size = fontSize; font.numChars = numChars; font.charValues = (int *)malloc(font.numChars*sizeof(int)); font.charRecs = (Rectangle *)malloc(font.numChars*sizeof(Rectangle)); font.charOffsets = (Vector2 *)malloc(font.numChars*sizeof(Vector2)); font.charAdvanceX = (int *)malloc(font.numChars*sizeof(int)); for (int i = 0; i < font.numChars; i++) { font.charValues[i] = fontChars[i]; font.charRecs[i].x = (int)charData[i].x0; font.charRecs[i].y = (int)charData[i].y0; font.charRecs[i].width = (int)charData[i].x1 - (int)charData[i].x0; font.charRecs[i].height = (int)charData[i].y1 - (int)charData[i].y0; font.charOffsets[i] = (Vector2){ charData[i].xoff, charData[i].yoff }; font.charAdvanceX[i] = (int)charData[i].xadvance; } free(charData); return font; }
double trunc(double d) { return (d > 0) ? floor(d) : ceil(d) ; }
double pp_model::log_likelihood_up_to(double t){ if(m_poisson_regression) return poisson_regression_log_likelihood_interval(0,static_cast<int>(ceil(t))); m_r = m_data_cont ? m_data_cont->find_data_index(t) : 0; return log_likelihood_interval_with_count(0,t,m_r); }
double round(double n) { return n < 0.0 ? ceil(n - 0.5) : floor(n + 0.5); }
int32 Layer::calc_mini_image() { // This function might crash if it is executing while the layer is // destroyed. This is because the bitmap and miniature image are destroyed. // We need something to stop this thread when the layer is being destroyed. union { uint8 bytes[4]; uint32 word; } white, color; white.bytes[0] = 0xFF; white.bytes[1] = 0xFF; white.bytes[2] = 0xFF; white.bytes[3] = 0x00; // increase the number of waiting threads atomic_add(&fLayerPreviewThreads,1); // aquire the semaphore that is required to access the fLayerPreview acquire_sem(fLayerPreviewSem); // decrease the number of waiting threads atomic_add(&fLayerPreviewThreads,-1); int32 miniature_width = (int32)(HS_MINIATURE_IMAGE_WIDTH * (min_c(fLayerData->Bounds().Width()/fLayerData->Bounds().Height(),1))); int32 miniature_height = (int32)(HS_MINIATURE_IMAGE_HEIGHT * (min_c(fLayerData->Bounds().Height()/fLayerData->Bounds().Width(),1))); // Here we copy the contents of fLayerData to miniature image. // by using a DDA-scaling algorithm first take the dx and dy variables float dx = (fLayerData->Bounds().Width() + 1)/(float)miniature_width; float dy = (fLayerData->Bounds().Height() + 1)/(float)miniature_height; int32 x=0,y=0; int32 x_offset_left = (int32)floor((float)(HS_MINIATURE_IMAGE_WIDTH-miniature_width)/2.0); int32 x_offset_right = (int32)ceil((float)(HS_MINIATURE_IMAGE_WIDTH-miniature_width)/2.0); int32 y_offset = (HS_MINIATURE_IMAGE_HEIGHT-miniature_height)/2; // The bitmap might be changed and deleted while we are accessing it. int32 b_bpr = fLayerData->BytesPerRow()/4; uint32* big_image; uint32* small_image = (uint32*)fLayerPreview->Bits(); big_image = (uint32*)fLayerData->Bits(); // Clear the parts that we do not set. for (int32 i=0; i<HS_MINIATURE_IMAGE_WIDTH*y_offset; i++) *small_image++ = white.word; while ((y < miniature_height) && (fLayerPreviewThreads == 0)) { for (int32 i=0; i<x_offset_left; i++) *small_image++ = white.word; while ((x < miniature_width) && (fLayerPreviewThreads == 0)) { color.word = *(big_image + ((int32)(y*dy))*b_bpr + (int32)(x*dx)); color.bytes[0] = (uint8)(color.bytes[0] * float_alpha_table[color.bytes[3]] + 255 * (1.0 - float_alpha_table[color.bytes[3]])); color.bytes[1] = (uint8)(color.bytes[1] * float_alpha_table[color.bytes[3]] + 255 * (1.0 - float_alpha_table[color.bytes[3]])); color.bytes[2] = (uint8)(color.bytes[2] * float_alpha_table[color.bytes[3]] + 255 * (1.0 - float_alpha_table[color.bytes[3]])); *small_image++ = color.word; x++; } y++; for (int32 i=0; i<x_offset_right; i++) *small_image++ = white.word; x = 0; } // Clear the rest of the image while (small_image != ((uint32*)fLayerPreview->Bits() + fLayerPreview->BitsLength()/4)) *small_image++ = white.word; if (fLayerPreviewThreads == 0) { snooze(50 * 1000); if (fLayerPreviewThreads == 0) { if (fLayerView->LockLooper()) { fLayerView->UpdateImage(); BView* bmap_view; if ((bmap_view = fLayerView->Window()->FindView("bitmap_view")) != NULL) { bmap_view->Draw(bmap_view->Bounds()); } fLayerView->UnlockLooper(); } } } release_sem(fLayerPreviewSem); return B_OK; }