void UMarchingCubes::CreateGrid(int32 SizeX, int32 SizeY, int32 SizeZ, float InitialIsoValue) { // First drop the existing grid if it exists DestroyGrid(); // Create new grid GridSize.X = SizeX; GridSize.Y = SizeY; GridSize.Z = SizeZ; // Allocate memory for the new grid m_pVoxels = new float**[SizeX]; for (int32 X = 0; X<SizeX; ++X) { m_pVoxels[X] = new float*[SizeY]; for (int32 Y = 0; Y < SizeY; ++Y) { m_pVoxels[X][Y] = new float[SizeZ]; for (int32 Z = 0; Z < SizeZ; ++Z) { // Set default values m_pVoxels[X][Y][Z] = InitialIsoValue; } } } }
UMarchingCubes::~UMarchingCubes() { DestroyGrid(); }
void ParallelExecute () { long my_id; long num_boxes; unsigned long start, finish = 0; time_info *local_time; long time_all = 0; time_info *timing; unsigned long local_init_done = 0; BARINCLUDE(G_Memory->synch); local_time = (time_info *) malloc(sizeof(struct _Time_Info) * MAX_TIME_STEPS); BARRIER(G_Memory->synch, Number_Of_Processors); LOCK(G_Memory->count_lock); my_id = G_Memory->id; G_Memory->id++; UNLOCK(G_Memory->count_lock); /* POSSIBLE ENHANCEMENT: Here is where one might pin processes to processors to avoid migration */ if (my_id == 0) { time_all = 1; } else if (do_stats) { time_all = 1; } if (my_id == 0) { /* have to allocate extra space since it will construct the grid by * itself for the first time step */ CreateParticleList(my_id, Total_Particles); InitParticleList(my_id, Total_Particles, 0); } else { CreateParticleList(my_id, ((Total_Particles * PDF) / Number_Of_Processors)); InitParticleList(my_id, 0, 0); } // num_boxes = 1333 * (Total_Particles / (OCCUPANCY * MAX_PARTICLES_PER_BOX)) /1000; num_boxes = 1333 * 4 * Total_Particles / (3 * MAX_PARTICLES_PER_BOX * 1000 ); if (my_id == 0) CreateBoxes(my_id, TOLERANCE * num_boxes); else CreateBoxes(my_id, TOLERANCE * num_boxes * BDF / Number_Of_Processors); if (my_id == 0) { LockedPrint("Starting FMM with %d processor%s\n", Number_Of_Processors, (Number_Of_Processors == 1) ? "" : "s"); } BARRIER(G_Memory->synch, Number_Of_Processors); Local[my_id].Time = 0.0; for (MY_TIME_STEP = 0; MY_TIME_STEP < Time_Steps; MY_TIME_STEP++) { if (MY_TIME_STEP == 2) { /* POSSIBLE ENHANCEMENT: Here is where one might reset the statistics that one is measuring about the parallel execution */ } if (MY_TIME_STEP == 2) { if (do_stats || my_id == 0) { CLOCK(local_init_done); } } if (MY_TIME_STEP == 0) { CLOCK(start); } else start = finish; ConstructGrid(my_id,local_time,time_all); ConstructLists(my_id,local_time,time_all); PartitionGrid(my_id,local_time,time_all); StepSimulation(my_id,local_time,time_all); DestroyGrid(my_id,local_time,time_all); CLOCK(finish); Local[my_id].Time += Timestep_Dur; MY_TIMING[MY_TIME_STEP].total_time = finish - start; } if (my_id == 0) { CLOCK(endtime); } BARRIER(G_Memory->synch, Number_Of_Processors); for (MY_TIME_STEP = 0; MY_TIME_STEP < Time_Steps; MY_TIME_STEP++) { timing = &(MY_TIMING[MY_TIME_STEP]); timing->other_time = local_time[MY_TIME_STEP].other_time; timing->construct_time = local_time[MY_TIME_STEP].construct_time; timing->list_time = local_time[MY_TIME_STEP].list_time; timing->partition_time = local_time[MY_TIME_STEP].partition_time; timing->pass_time = local_time[MY_TIME_STEP].pass_time; timing->inter_time = local_time[MY_TIME_STEP].inter_time; timing->barrier_time = local_time[MY_TIME_STEP].barrier_time; timing->intra_time = local_time[MY_TIME_STEP].intra_time; } Local[my_id].init_done_times = local_init_done; BARRIER(G_Memory->synch, Number_Of_Processors); }