Beispiel #1
0
bool ScriptRunIterator::consume(unsigned& limit, UScriptCode& script) {
  if (m_currentSet.isEmpty()) {
    return false;
  }

  size_t pos;
  UChar32 ch;
  while (fetch(&pos, &ch)) {
    PairedBracketType pairedType = m_scriptData->getPairedBracketType(ch);
    switch (pairedType) {
      case PairedBracketType::BracketTypeOpen:
        openBracket(ch);
        break;
      case PairedBracketType::BracketTypeClose:
        closeBracket(ch);
        break;
      default:
        break;
    }
    if (!mergeSets()) {
      limit = pos;
      script = resolveCurrentScript();
      fixupStack(script);
      m_currentSet = m_nextSet;
      return true;
    }
  }

  limit = m_length;
  script = resolveCurrentScript();
  m_currentSet.clear();
  return true;
}
Beispiel #2
0
void
NodalFloodCount::finalize()
{
  // Exchange data in parallel
  pack(_packed_data);
  _communicator.allgather(_packed_data, false);
  unpack(_packed_data);

  mergeSets();

  // Populate _bubble_maps and _var_index_maps
  updateFieldInfo();

  // Update the region offsets so we can get unique bubble numbers in multimap mode
  updateRegionOffsets();

  // Calculate and out output bubble volume data
  if (_pars.isParamValid("bubble_volume_file"))
  {
    calculateBubbleVolumes();
    std::vector<Real> data; data.reserve(_all_bubble_volumes.size() + 2);
    data.push_back(_fe_problem.timeStep());
    data.push_back(_fe_problem.time());
    data.insert(data.end(), _all_bubble_volumes.begin(), _all_bubble_volumes.end());
    writeCSVFile(getParam<FileName>("bubble_volume_file"), data);
  }

  // Calculate memory usage
  if (_track_memory)
  {
    _bytes_used += calculateUsage();
    _communicator.sum(_bytes_used);
    formatBytesUsed();
  }
}
Beispiel #3
0
void FeatureFloodCount::communicateAndMerge()
{
  // First we need to transform the raw data into a usable data structure
  populateDataStructuresFromFloodData();

  /*********************************************************************************
   *********************************************************************************
   * Begin Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  /**
   * The libMesh packed range routines handle the communication of the individual
   * string buffers. Here we need to create a container to hold our type
   * to serialize. It'll always be size one because we are sending a single
   * byte stream of all the data to other processors. The stream need not be
   * the same size on all processors.
   */
  std::vector<std::string> send_buffers(1);

  /**
   * Additionally we need to create a different container to hold the received
   * byte buffers. The container type need not match the send container type.
   * However, We do know the number of incoming buffers (num processors) so we'll
   * go ahead and use a vector.
   */
  std::vector<std::string> recv_buffers;
  recv_buffers.reserve(_app.n_processors());

  serialize(send_buffers[0]);

  /**
   * Each processor needs information from all other processors to create a complete
   * global feature map.
   */
  _communicator.allgather_packed_range((void *)(NULL), send_buffers.begin(), send_buffers.end(),
                                       std::back_inserter(recv_buffers));

  deserialize(recv_buffers);

  /*********************************************************************************
   *********************************************************************************
   * End Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  // We'll inflate the bounding boxes by a percentage of the domain
  RealVectorValue inflation;
  for (unsigned int i = 0; i < LIBMESH_DIM; ++i)
    inflation(i) = _mesh.dimensionWidth(i);

  // Let's try 1%
  inflation *= 0.01;
  inflateBoundingBoxes(inflation);

  mergeSets(true);
}
void
FeatureFloodCount::finalize()
{
  // Exchange data in parallel
  pack(_packed_data);
  _communicator.allgather(_packed_data, false);
  unpack(_packed_data);

  mergeSets(true);

  // Populate _bubble_maps and _var_index_maps
  updateFieldInfo();

  // Update the region offsets so we can get unique bubble numbers in multimap mode
  updateRegionOffsets();

  // Calculate and out output bubble volume data
  if (_pars.isParamValid("bubble_volume_file"))
  {
    calculateBubbleVolumes();
    std::vector<Real> data;
    data.reserve(_all_bubble_volumes.size() + _total_volume_intersecting_boundary.size() + 2);

    // Insert the current timestep and the simulation time into the data vector
    data.push_back(_fe_problem.timeStep());
    data.push_back(_fe_problem.time());

    // Insert the (sorted) bubble volumes into the data vector
    data.insert(data.end(), _all_bubble_volumes.begin(), _all_bubble_volumes.end());

    // If we are computing the boundary-intersecting volumes, insert
    // those numbers into the normalized boundary-intersecting bubble
    // volumes into the data vector.
    if (_compute_boundary_intersecting_volume)
      data.insert(data.end(), _total_volume_intersecting_boundary.begin(), _total_volume_intersecting_boundary.end());

    // Finally, write the file
    writeCSVFile(getParam<FileName>("bubble_volume_file"), data);
  }

  // Calculate memory usage
  if (_track_memory)
  {
    _bytes_used += calculateUsage();
    _communicator.sum(_bytes_used);
    formatBytesUsed();
  }
}
Beispiel #5
0
void FeatureFloodCount::communicateAndMerge()
{
  // First we need to transform the raw data into a usable data structure
  prepareDataForTransfer();

  /*********************************************************************************
   *********************************************************************************
   * Begin Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  /**
   * The libMesh packed range routines handle the communication of the individual
   * string buffers. Here we need to create a container to hold our type
   * to serialize. It'll always be size one because we are sending a single
   * byte stream of all the data to other processors. The stream need not be
   * the same size on all processors.
   */
  std::vector<std::string> send_buffers(1);

  /**
   * Additionally we need to create a different container to hold the received
   * byte buffers. The container type need not match the send container type.
   * However, We do know the number of incoming buffers (num processors) so we'll
   * go ahead and use a vector.
   */
  std::vector<std::string> recv_buffers;
  recv_buffers.reserve(_app.n_processors());

  serialize(send_buffers[0]);

  /**
   * Each processor needs information from all other processors to create a complete
   * global feature map.
   */
  _communicator.allgather_packed_range((void *)(nullptr), send_buffers.begin(), send_buffers.end(),
                                       std::back_inserter(recv_buffers));

  deserialize(recv_buffers);

  /*********************************************************************************
   *********************************************************************************
   * End Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  mergeSets(true);
}
Beispiel #6
0
void FeatureFloodCount::communicateAndMerge()
{
  // First we need to transform the raw data into a usable data structure
  prepareDataForTransfer();

  /**
   * The libMesh packed range routines handle the communication of the individual
   * string buffers. Here we need to create a container to hold our type
   * to serialize. It'll always be size one because we are sending a single
   * byte stream of all the data to other processors. The stream need not be
   * the same size on all processors.
   */
  std::vector<std::string> send_buffers(1);

  /**
   * Additionally we need to create a different container to hold the received
   * byte buffers. The container type need not match the send container type.
   * However, We do know the number of incoming buffers (num processors) so we'll
   * go ahead and use a vector.
   */
  std::vector<std::string> recv_buffers;
  if (_is_master)
    recv_buffers.reserve(_app.n_processors());

  serialize(send_buffers[0]);

  // Free up as much memory as possible here before we do global communication
  clearDataStructures();

  /**
   * Send the data from all processors to the root to create a complete
   * global feature map.
   */
  _communicator.gather_packed_range(0, (void *)(nullptr), send_buffers.begin(), send_buffers.end(),
                                    std::back_inserter(recv_buffers));

  if (_is_master)
  {
    // The root process now needs to deserialize and merge all of the data
    deserialize(recv_buffers);
    recv_buffers.clear();

    mergeSets(true);
  }

  // Make sure that feature count is communicated to all ranks
  _communicator.broadcast(_feature_count);
}
Beispiel #7
0
bool Renderer::initialize(const std::vector <RealtimeExportPassInfo> & config, StringIdMap & stringMap, const std::string & shaderPath, unsigned int w,unsigned int h, const std::set <std::string> & globalDefines, std::ostream & errorOutput)
{
	// Clear existing passes.
	clear();

	// Add new passes.
	int passCount = 0;
	for (std::vector <RealtimeExportPassInfo>::const_iterator i = config.begin(); i != config.end(); i++, passCount++)
	{
		// Create unique names based on the path and define list.
		std::string vertexShaderName = i->vertexShader;
		if (!i->vertexShaderDefines.empty())
			vertexShaderName += " "+UTILS::implode(i->vertexShaderDefines," ");
		std::string fragmentShaderName = i->fragmentShader;
		if (!i->fragmentShaderDefines.empty())
			fragmentShaderName += " "+UTILS::implode(i->fragmentShaderDefines," ");

		// Load shaders from the pass if necessary.
		if ((shaders.find(vertexShaderName) == shaders.end()) && (!loadShader(shaderPath.empty() ? i->vertexShader : shaderPath+"/"+i->vertexShader, vertexShaderName, mergeSets(i->vertexShaderDefines, globalDefines), GL_VERTEX_SHADER, errorOutput)))
			return false;
		if ((shaders.find(fragmentShaderName) == shaders.end()) && (!loadShader(shaderPath.empty() ? i->fragmentShader : shaderPath+"/"+i->fragmentShader, fragmentShaderName, mergeSets(i->fragmentShaderDefines, globalDefines), GL_FRAGMENT_SHADER, errorOutput)))
			return false;

		// Note which draw groups the pass uses.
		for (std::vector <std::string>::const_iterator g = i->drawGroups.begin(); g != i->drawGroups.end(); g++)
			drawGroupToPasses[stringMap.addStringId(*g)].push_back(passes.size());

		// Initialize the pass.
		int passIdx = passes.size();
		passes.push_back(RenderPass());
		if (!passes.back().initialize(passCount, *i, stringMap, gl, shaders.find(vertexShaderName)->second, shaders.find(fragmentShaderName)->second, sharedTextures, w, h, errorOutput))
			return false;

		// Put the pass's output render targets into a map so we can feed them to subsequent passes.
		const std::map <StringId, RenderTexture> & passRTs = passes.back().getRenderTargets();
		for (std::map <StringId, RenderTexture>::const_iterator rt = passRTs.begin(); rt != passRTs.end(); rt++)
		{
			StringId nameId = rt->first;
			sharedTextures.insert(std::make_pair(nameId, RenderTextureEntry(nameId, rt->second.handle, rt->second.target)));
		}

		// Remember the pass index.
		passIndexMap[stringMap.addStringId(i->name)] = passIdx;
	}

	return true;
}
Beispiel #8
0
static void
_pic14_do_link (void)
{
  /*
   * link command format:
   * {linker} {incdirs} {lflags} -o {outfile} {spec_ofiles} {ofiles} {libs}
   *
   */
#define LFRM  "{linker} {incdirs} {sysincdirs} {lflags} -w -r -o {outfile} {user_ofile} {spec_ofiles} {ofiles} {libs}"
  hTab *linkValues = NULL;
  char *lcmd;
  set *tSet = NULL;
  int ret;
  char * procName;

  shash_add (&linkValues, "linker", "gplink");

  /* LIBRARY SEARCH DIRS */
  mergeSets (&tSet, libPathsSet);
  mergeSets (&tSet, libDirsSet);
  shash_add (&linkValues, "incdirs", joinStrSet (processStrSet (tSet, "-I", NULL, shell_escape)));

  joinStrSet (processStrSet (libDirsSet, "-I", NULL, shell_escape));
  shash_add (&linkValues, "sysincdirs", joinStrSet (processStrSet (libDirsSet, "-I", NULL, shell_escape)));

  shash_add (&linkValues, "lflags", joinStrSet (linkOptionsSet));

  {
    char *s = shell_escape (fullDstFileName ? fullDstFileName : dstFileName);

    shash_add (&linkValues, "outfile", s);
    Safe_free (s);
  }

  if (fullSrcFileName)
    {
      struct dbuf_s dbuf;
      char *s;

      dbuf_init (&dbuf, 128);

      dbuf_append_str (&dbuf, fullDstFileName ? fullDstFileName : dstFileName);
      dbuf_append (&dbuf, ".o", 2);
      s = shell_escape (dbuf_c_str (&dbuf));
      dbuf_destroy (&dbuf);
      shash_add (&linkValues, "user_ofile", s);
      Safe_free (s);
    }

  shash_add (&linkValues, "ofiles", joinStrSet (processStrSet (relFilesSet, NULL, NULL, shell_escape)));

  /* LIBRARIES */
  procName = processor_base_name ();
  if (!procName)
    procName = "16f877";

  addSet (&libFilesSet, Safe_strdup (pic14_getPIC()->isEnhancedCore ?
          "libsdcce.lib" : "libsdcc.lib"));

    {
      struct dbuf_s dbuf;

      dbuf_init (&dbuf, 128);
      dbuf_append (&dbuf, "pic", sizeof ("pic") - 1);
      dbuf_append_str (&dbuf, procName);
      dbuf_append (&dbuf, ".lib", sizeof (".lib") - 1);
      addSet (&libFilesSet, dbuf_detach_c_str (&dbuf));
    }

  shash_add (&linkValues, "libs", joinStrSet (processStrSet (libFilesSet, NULL, NULL, shell_escape)));

  lcmd = msprintf(linkValues, LFRM);
  ret = sdcc_system (lcmd);
  Safe_free (lcmd);

  if (ret)
    exit (1);
}
Beispiel #9
0
void
GrainTracker::finalize()
{
 // Don't track grains if the current simulation step is before the specified tracking step
  if (_t_step < _tracking_step)
    return;
  Moose::perf_log.push("finalize()","GrainTracker");

  // Exchange data in parallel
  pack(_packed_data);
  _communicator.allgather(_packed_data, false);
  unpack(_packed_data);
  mergeSets(false);

  Moose::perf_log.push("buildspheres()","GrainTracker");
  buildBoundingSpheres();                    // Build bounding sphere information
  Moose::perf_log.pop("buildspheres()","GrainTracker");

  // Now merge sets again but this time we'll add periodic neighbor information
  mergeSets(true);

  Moose::perf_log.push("trackGrains()","GrainTracker");
  trackGrains();
  Moose::perf_log.pop("trackGrains()","GrainTracker");

  Moose::perf_log.push("remapGrains()","GrainTracker");
  if (_remap)
    remapGrains();
  Moose::perf_log.pop("remapGrains()","GrainTracker");

  updateFieldInfo();
  Moose::perf_log.pop("finalize()","GrainTracker");

  // Calculate and out output bubble volume data
  if (_pars.isParamValid("bubble_volume_file"))
  {
    calculateBubbleVolumes();
    std::vector<Real> data; data.reserve(_all_bubble_volumes.size() + 2);
    data.push_back(_fe_problem.timeStep());
    data.push_back(_fe_problem.time());
    data.insert(data.end(), _all_bubble_volumes.begin(), _all_bubble_volumes.end());
    writeCSVFile(getParam<FileName>("bubble_volume_file"), data);
  }

  if (_compute_op_maps)
  {
    for (std::map<unsigned int, UniqueGrain *>::const_iterator grain_it = _unique_grains.begin();
         grain_it != _unique_grains.end(); ++grain_it)
    {
      if (grain_it->second->status != INACTIVE)
      {
        std::set<dof_id_type>::const_iterator elem_it_end = grain_it->second->entities_ptr->end();
        for (std::set<dof_id_type>::const_iterator elem_it = grain_it->second->entities_ptr->begin(); elem_it != elem_it_end; ++elem_it)
          _elemental_data[*elem_it].push_back(std::make_pair(grain_it->first, grain_it->second->variable_idx));
      }
    }
  }

  // Calculate memory usage
  if (_track_memory)
  {
    _bytes_used += calculateUsage();
    _communicator.sum(_bytes_used);
    formatBytesUsed();
  }
}
Beispiel #10
0
bool
ReachingMap::initialise(const MaiMap &decode,
			StateMachine *sm,
			const AllowableOptimisations &opt,
			OracleInterface *oracle)
{
	struct {
		const AllowableOptimisations *opt;
		OracleInterface *oracle;
		const MaiMap *decode;
		bool operator()(StateMachineSideEffectStore *store) {
			if (!oracle->hasConflictingRemoteStores(*decode, *opt, store))
				return true;
			return false;
		}
		const StateMachineSideEffectStore *operator()(const StateMachineState *s) {
			const StateMachineSideEffect *se = s->getSideEffect();
			if (!se || se->type != StateMachineSideEffect::Store)
				return NULL;
			StateMachineSideEffectStore *store = (StateMachineSideEffectStore *)se;
			if (!(*this)(store))
				return NULL;
			return store;
		}
	} interestingStore = {&opt, oracle, &decode};

	/* Start by saying that every state needs updating. */
	std::vector<const StateMachineState *> needsUpdate;
	enumStates(sm, &needsUpdate);

	/* Now iterate making every state locally consistent.  The
	 * local rule is that a store is considered to reach a
	 * successor of state X if either:
	 *
	 * -- It reaches the start of state X and state X didn't
	 *    clobber it, or
	 * -- state X generates it.
	 *
	 * Once you iterate to a fixed point, a store reaches X if
	 * there is any possibility that the store might satisfy a
	 * load at X. */
	while (!needsUpdate.empty()) {
		const StateMachineState *s = needsUpdate.back();
		needsUpdate.pop_back();

		/* Start with our entry state, and from that compute
		   our exit state. */
		std::set<const StateMachineSideEffectStore *> _result;
		std::set<const StateMachineSideEffectStore *> *result;
		const StateMachineSideEffectStore *store = interestingStore(s);
		if (store) {
			/* Have to compute a new output state. */
			_result = content[s];
			/* Remove anything which is definitely clobbered */
			for (auto it = _result.begin(); it != _result.end(); ) {
				if (definitelyEqual( (*it)->addr, store->addr, opt ) ) {
					_result.erase(it++);
				} else {
					it++;
				}
			}
			/* And introduce the new value */
			_result.insert(store);
			result = &_result;
		} else {
			/* Output state is the same as the input state */
			result = &content[s];
		}

		std::vector<const StateMachineState *> targets;
		s->targets(targets);
		for (auto it = targets.begin(); it != targets.end(); it++) {
			const StateMachineState *target = *it;
			if (mergeSets(content[target], *result))
				needsUpdate.push_back(target);
		}
	}

	/* Realtionship is now globally consistent, and can therefore
	 * be safely used. */
	return true;
}