Example #1
0
double terrama2::services::analysis::core::grid::zonal::forecast::accum::operatorImpl(terrama2::services::analysis::core::StatisticOperation statisticOperation,
    const std::string& dataSeriesName, const std::string& dateDiscardBefore, const std::string& dateDiscardAfter, const size_t band, terrama2::services::analysis::core::Buffer buffer)
{
  OperatorCache cache;
  terrama2::services::analysis::core::python::readInfoFromDict(cache);
  // After the operator lock is released it's not allowed to return any value because it doesn' have the interpreter lock.
  // In case an exception is thrown, we need to set this boolean. Once the code left the lock is acquired we should return NAN.
  bool exceptionOccurred = false;

  auto& contextManager = ContextManager::getInstance();
  auto analysis = cache.analysisPtr;

  try
  {
    terrama2::core::verify::analysisMonitoredObject(analysis);
  }
  catch(const terrama2::core::VerifyException&)
  {
    contextManager.addError(cache.analysisHashCode, QObject::tr("Use of invalid operator for analysis %1.").arg(analysis->id).toStdString());
    return NAN;
  }

  terrama2::services::analysis::core::MonitoredObjectContextPtr context;
  try
  {
    context = ContextManager::getInstance().getMonitoredObjectContext(cache.analysisHashCode);
  }
  catch(const terrama2::Exception& e)
  {
    TERRAMA2_LOG_ERROR() << boost::get_error_info<terrama2::ErrorDescription>(e)->toStdString();
    return NAN;
  }


  try
  {
    // In case an error has already occurred, there is nothing to be done
    if(!context->getErrors().empty())
      return NAN;

    bool hasData = false;

    auto dataManagerPtr = context->getDataManager().lock();
    if(!dataManagerPtr)
    {
      QString errMsg(QObject::tr("Invalid data manager."));
      throw terrama2::core::InvalidDataManagerException() << terrama2::ErrorDescription(errMsg);
    }

    std::shared_ptr<ContextDataSeries> moDsContext = context->getMonitoredObjectContextDataSeries(dataManagerPtr);
    if(!moDsContext)
    {
      QString errMsg(QObject::tr("Could not recover monitored object data series."));
      throw InvalidDataSeriesException() << terrama2::ErrorDescription(errMsg);
    }

    if(moDsContext->series.syncDataSet->size() == 0)
    {
      QString errMsg(QObject::tr("Could not recover monitored object data series."));
      throw InvalidDataSeriesException() << terrama2::ErrorDescription(errMsg);
    }

    auto moGeom = moDsContext->series.syncDataSet->getGeometry(cache.index, moDsContext->geometryPos);
    if(!moGeom.get())
    {
      QString errMsg(QObject::tr("Could not recover monitored object geometry."));
      throw InvalidDataSetException() << terrama2::ErrorDescription(errMsg);
    }
    auto geomResult = createBuffer(buffer, moGeom);

    auto dataSeries = context->findDataSeries(dataSeriesName);

    /////////////////////////////////////////////////////////////////
    //map of sum of values for each pixel
    std::unordered_map<std::pair<int, int>, std::pair<double, int>, boost::hash<std::pair<int, int> > > valuesMap;

    auto datasets = dataSeries->datasetList;
    for(const auto& dataset : datasets)
    {
      auto rasterList = context->getRasterList(dataSeries, dataset->id, dateDiscardBefore, dateDiscardAfter);
      //sanity check, if no date range only the last raster should be returned
      if(dateDiscardBefore.empty() && rasterList.size() > 1)
      {
        QString errMsg(QObject::tr("Invalid list of raster for dataset: %1").arg(dataset->id));
        throw terrama2::InvalidArgumentException() << terrama2::ErrorDescription(errMsg);
      }

      if(rasterList.empty())
      {
        QString errMsg(QObject::tr("Invalid raster for dataset: %1").arg(dataset->id));
        throw terrama2::InvalidArgumentException() << terrama2::ErrorDescription(errMsg);
      }

      auto firstRaster = rasterList.front();

      //no intersection between the raster and the object geometry
      if(!firstRaster->getExtent()->intersects(*geomResult->getMBR()))
        continue;

      geomResult->transform(firstRaster->getSRID());
      prec::appendValues(rasterList, band, geomResult.get() , valuesMap);

      if(!valuesMap.empty())
      {
        hasData = true;
        break;
      }
    }

    if(exceptionOccurred)
      return NAN;

    if(!hasData && statisticOperation != StatisticOperation::COUNT)
    {
      return NAN;
    }
    std::vector<double> values;
    values.reserve(valuesMap.size());

    for(const auto& pair : valuesMap)
      values.push_back(pair.second.first);

    terrama2::services::analysis::core::calculateStatistics(values, cache);
    return terrama2::services::analysis::core::getOperationResult(cache, statisticOperation);
  }
  catch(const terrama2::Exception& e)
  {
    context->addError(boost::get_error_info<terrama2::ErrorDescription>(e)->toStdString());
    return NAN;
  }
  catch(const std::exception& e)
  {
    context->addError(e.what());
    return NAN;
  }
  catch(...)
  {
    QString errMsg = QObject::tr("An unknown exception occurred.");
    context->addError(errMsg.toStdString());
    return NAN;
  }
}
 inline value_type& top() { return front(); }
Example #3
0
ModelConnector::ModelConnector(Model *g)
{
    QMap<QString, int> nodeID;

    PQP::Manager m(g->nodes.size());

    // load up meshes for all parts
    for(auto n : g->nodes)
    {
        nodeID[n->id] = nodeID.size();
        auto mesh = makeModelPQP(g->getMesh(n->id));
        if(mesh.empty()) continue;

        m.addModel(mesh);
    }

    QMap<QString, QVector<QPair<double, QString> > > possibleEdges;

    for(int i = 0; i < g->nodes.size(); i++)
    {
        for(int j = i+1; j < g->nodes.size(); j++)
        {
            auto ni = g->nodes[i];
            auto nj = g->nodes[j];

            // Ignore when edge existed
            if(g->getEdge(ni->id, nj->id)) continue;

            // Ignore when two nodes are in the same group
            if(g->shareGroup(ni->id, nj->id)) continue;

            // Check if edge needs to happen
            auto isects = m.testIntersection(i,j);
            std::sort(isects.begin(), isects.end());

            auto closest = isects.front();
            possibleEdges[ni->id].push_back( qMakePair(closest.distance, nj->id) );
            possibleEdges[nj->id].push_back( qMakePair(closest.distance, ni->id) );
        }
    }

    double threshold = g->robustBBox().diagonal().norm() * 0.05;

    for(auto nid : possibleEdges.keys())
    {
        auto possible = possibleEdges[nid];
        std::sort(possible.begin(), possible.end());

        for(auto edge : possible)
        {
            if(g->getEdge(nid, edge.second)) continue;

            double dist = edge.first;

            if(dist > threshold) continue;

            g->addEdge(g->getNode(nid), g->getNode(edge.second));

            //qDebug() << "Adding edge: " + nid + " - " + edge.second;
        }
    }

	g->ShapeGraph::property["showEdges"].setValue(true);
}
Example #4
0
WXSmoothEdge *WXFaceLayer::BuildSmoothEdge()
{
	// if the smooth edge has already been built: exit
	if (_pSmoothEdge)
		return _pSmoothEdge;
	real ta, tb;
	WOEdge *woea(0), *woeb(0);
	bool ok = false;
	vector<int> cuspEdgesIndices;
	int indexStart, indexEnd;
	unsigned nedges = _pWXFace->numberOfEdges();
	if (_nNullDotP == nedges) {
		_pSmoothEdge = NULL;
		return _pSmoothEdge;
	}
	if ((_nPosDotP != 0) && (_nPosDotP != _DotP.size()) && (_nNullDotP == 0)) {
		// that means that we have a smooth edge that starts from an edge and ends at an edge
		//-----------------------------
		// We retrieve the 2 edges for which we have opposite signs for each extremity
		RetrieveCuspEdgesIndices(cuspEdgesIndices);
		if (cuspEdgesIndices.size() != 2) // we necessarly have 2 cusp edges
			return 0;

		// let us determine which cusp edge corresponds to the starting:
		// We can do that because we defined that a silhouette edge had the back facing part on its right.
		// So if the WOEdge woea is such that woea[0].dotp > 0 and woea[1].dotp < 0, it is the starting edge.
		//-------------------------------------------

		if (_DotP[cuspEdgesIndices[0]] > 0) {
			woea = _pWXFace->GetOEdge(cuspEdgesIndices[0]);
			woeb = _pWXFace->GetOEdge(cuspEdgesIndices[1]);
			indexStart = cuspEdgesIndices[0];
			indexEnd = cuspEdgesIndices[1];
		}
		else {
			woea = _pWXFace->GetOEdge(cuspEdgesIndices[1]);
			woeb = _pWXFace->GetOEdge(cuspEdgesIndices[0]);
			indexStart = cuspEdgesIndices[1];
			indexEnd = cuspEdgesIndices[0];
		}

		// Compute the interpolation:
		ta = _DotP[indexStart] / (_DotP[indexStart] - _DotP[(indexStart + 1) % nedges]);
		tb = _DotP[indexEnd] / (_DotP[indexEnd] - _DotP[(indexEnd + 1) % nedges]);
		ok = true;
	}
	else if (_nNullDotP == 1) {
		// that means that we have exactly one of the 2 extremities of our silhouette edge is a vertex of the mesh
		if ((_nPosDotP == 2) || (_nPosDotP == 0)) {
			_pSmoothEdge = NULL;
			return _pSmoothEdge;
		}
		RetrieveCuspEdgesIndices(cuspEdgesIndices);
		// We should have only one EdgeCusp:
		if (cuspEdgesIndices.size() != 1) {
			if (G.debug & G_DEBUG_FREESTYLE) {
				cout << "Warning in BuildSmoothEdge: weird WXFace configuration" << endl;
			}
			_pSmoothEdge = NULL;
			return NULL;
		}
		unsigned index0 = Get0VertexIndex(); // retrieve the 0 vertex index
		unsigned nedges = _pWXFace->numberOfEdges();
		if (_DotP[cuspEdgesIndices[0]] > 0) {
			woea = _pWXFace->GetOEdge(cuspEdgesIndices[0]);
			woeb = _pWXFace->GetOEdge(index0);
			indexStart = cuspEdgesIndices[0];
			ta = _DotP[indexStart] / (_DotP[indexStart] - _DotP[(indexStart + 1) % nedges]);
			tb = 0.0;
		}
		else {
			woea = _pWXFace->GetOEdge(index0);
			woeb = _pWXFace->GetOEdge(cuspEdgesIndices[0]);
			indexEnd = cuspEdgesIndices[0];
			ta = 0.0;
			tb = _DotP[indexEnd] / (_DotP[indexEnd] - _DotP[(indexEnd + 1) % nedges]);
		}
		ok = true;
	}
	else if (_nNullDotP == 2) {
		// that means that the silhouette edge is an edge of the mesh
		int index = GetSmoothEdgeIndex();
		if (!_pWXFace->front()) {  // is it in the right order ?
			// the order of the WOEdge index is wrong
			woea = _pWXFace->GetOEdge((index + 1) % nedges);
			woeb = _pWXFace->GetOEdge((index - 1) % nedges);
			ta = 0;
			tb = 1;
			ok = true;
		}
		else {
			// here it's not good, our edge is a single point -> skip that face
			ok = false;
#if 0
			// the order of the WOEdge index is good
			woea = _pWXFace->GetOEdge((index - 1) % nedges);
			woeb = _pWXFace->GetOEdge((index + 1) % nedges);
			ta = 1;
			tb = 0;
#endif
		}
	}
	if (ok) {
		_pSmoothEdge = new WXSmoothEdge;
		_pSmoothEdge->setWOeA(woea);
		_pSmoothEdge->setWOeB(woeb);
		_pSmoothEdge->setTa(ta);
		_pSmoothEdge->setTb(tb);
		if (_Nature & Nature::SILHOUETTE) {
			if (_nNullDotP != 2) {
				if (_DotP[_ClosestPointIndex] + 0.01 > 0)
					_pSmoothEdge->setFront(true);
				else
					_pSmoothEdge->setFront(false);
			}
		}
	}

#if 0
	// check bording edges to see if they have different dotp values in bording faces.
	for (int i = 0; i < numberOfEdges(); i++) {
		WSFace *bface = (WSFace *)GetBordingFace(i);
		if (bface) {
			if ((front()) ^ (bface->front())) { // fA->front XOR fB->front (true if one is 0 and the other is 1)
				// that means that the edge i of the face is a silhouette edge
				// CHECK FIRST WHETHER THE EXACTSILHOUETTEEDGE HAS NOT YET BEEN BUILT ON THE OTHER FACE (1 is enough).
				if (((WSExactFace *)bface)->exactSilhouetteEdge()) {
					// that means that this silhouette edge has already been built
					return ((WSExactFace *)bface)->exactSilhouetteEdge();
				}
				// Else we must build it
				WOEdge *woea, *woeb;
				real ta, tb;
				if (!front()) { // is it in the right order ?
					// the order of the WOEdge index is wrong
					woea = _OEdgeList[(i + 1) % numberOfEdges()];
					if (0 == i)
						woeb = _OEdgeList[numberOfEdges() - 1];
					else
						woeb = _OEdgeList[(i - 1)];
					ta = 0;
					tb = 1;
				}
				else {
					// the order of the WOEdge index is good
					if (0 == i)
						woea = _OEdgeList[numberOfEdges() - 1];
					else
						woea = _OEdgeList[(i - 1)];
					woeb = _OEdgeList[(i + 1) % numberOfEdges()];
					ta = 1;
					tb = 0;
				}

				_pSmoothEdge = new ExactSilhouetteEdge(ExactSilhouetteEdge::VERTEX_VERTEX);
				_pSmoothEdge->setWOeA(woea);
				_pSmoothEdge->setWOeA(woeb);
				_pSmoothEdge->setTa(ta);
				_pSmoothEdge->setTb(tb);

				return _pSmoothEdge;
			}
		}
	}
#endif
	return _pSmoothEdge;
}
Example #5
0
/*
 * reoptimize() runs a trace through a second pass of TraceBuilder
 * optimizations, like this:
 *
 *   reset state.
 *   move all blocks to a temporary list.
 *   compute immediate dominators.
 *   for each block in trace order:
 *     if we have a snapshot state for this block:
 *       clear cse entries that don't dominate this block.
 *       use snapshot state.
 *     move all instructions to a temporary list.
 *     for each instruction:
 *       optimizeWork - do CSE and simplify again
 *       if not simplified:
 *         append existing instruction and update state.
 *       else:
 *         if the instruction has a result, insert a mov from the
 *         simplified tmp to the original tmp and discard the instruction.
 *     if the last conditional branch was turned into a jump, remove the
 *     fall-through edge to the next block.
 */
void TraceBuilder::reoptimize() {
  FTRACE(5, "ReOptimize:vvvvvvvvvvvvvvvvvvvv\n");
  SCOPE_EXIT { FTRACE(5, "ReOptimize:^^^^^^^^^^^^^^^^^^^^\n"); };
  assert(m_curTrace->isMain());
  assert(m_savedTraces.empty());

  m_state.setEnableCse(RuntimeOption::EvalHHIRCse);
  m_enableSimplification = RuntimeOption::EvalHHIRSimplification;
  if (!m_state.enableCse() && !m_enableSimplification) return;
  always_assert(!m_inReoptimize);
  m_inReoptimize = true;

  BlockList sortedBlocks = rpoSortCfg(m_unit);
  auto const idoms = findDominators(m_unit, sortedBlocks);
  m_state.clear();

  auto& traceBlocks = m_curTrace->blocks();
  BlockList blocks(traceBlocks.begin(), traceBlocks.end());
  traceBlocks.clear();
  for (auto* block : blocks) {
    assert(block->trace() == m_curTrace);
    FTRACE(5, "Block: {}\n", block->id());

    assert(m_curTrace->isMain());
    m_state.startBlock(block);
    m_curTrace->push_back(block);

    auto instructions = std::move(block->instrs());
    assert(block->empty());
    while (!instructions.empty()) {
      auto *inst = &instructions.front();
      instructions.pop_front();
      m_state.setMarker(inst->marker());

      // merging state looks at the current marker, and optimizeWork
      // below may create new instructions. Use the marker from this
      // instruction.
      assert(inst->marker().valid());
      setMarker(inst->marker());

      auto const tmp = optimizeWork(inst, idoms); // Can generate new instrs!
      if (!tmp) {
        // Could not optimize; keep the old instruction
        appendInstruction(inst, block);
        m_state.update(inst);
        continue;
      }
      SSATmp* dst = inst->dst();
      if (dst->type() != Type::None && dst != tmp) {
        // The result of optimization has a different destination than the inst.
        // Generate a mov(tmp->dst) to get result into dst. If we get here then
        // assume the last instruction in the block isn't a guard. If it was,
        // we would have to insert the mov on the fall-through edge.
        assert(block->empty() || !block->back().isBlockEnd());
        IRInstruction* mov = m_unit.mov(dst, tmp, inst->marker());
        appendInstruction(mov, block);
        m_state.update(mov);
      }
      // Not re-adding inst; remove the inst->taken edge
      if (inst->taken()) inst->setTaken(nullptr);
    }

    if (block->empty()) {
      // If all the instructions in the block were optimized away, remove it
      // from the trace.
      auto it = traceBlocks.end();
      --it;
      assert(*it == block);
      m_curTrace->unlink(it);
    } else {
      if (block->back().isTerminal()) {
        // Could have converted a conditional branch to Jmp; clear next.
        block->setNext(nullptr);
      }
      m_state.finishBlock(block);
    }
  }
}
Example #6
0
bool checkTmpsSpanningCalls(const IRUnit& unit) {
  auto const blocks = rpoSortCfg(unit);
  auto const children = findDomChildren(unit, blocks);

  // CallBuiltin is ok because it is not a php-level call.  (It will
  // call a C++ helper and we can push/pop around it normally.)
  auto isCall = [&] (Opcode op) {
    return op == Call || op == CallArray;
  };

  typedef StateVector<SSATmp,bool> State;

  bool isValid = true;
  forPreorderDoms(
    blocks.front(), children, State(unit, false),
    [&] (Block* b, State& state) {
      for (auto& inst : *b) {
        for (auto& src : inst.srcs()) {
          /*
           * These SSATmp's are used only for stack analysis in the
           * simplifier and therefore may live across calls.  In particular
           * these instructions are used to bridge the logical stack of the
           * caller when a callee is inlined so that analysis does not scan
           * into the callee stack when searching for a type of value in the
           * caller.
           */
          if (inst.op() == ReDefSP && src->isA(Type::StkPtr)) continue;
          if (inst.op() == ReDefGeneratorSP && src->isA(Type::StkPtr)) {
            continue;
          }

          if (src->isA(Type::FramePtr)) continue;
          if (src->isConst()) continue;
          if (!state[src]) {
            auto msg = folly::format("checkTmpsSpanningCalls failed\n"
                                     "  instruction: {}\n"
                                     "  src:         {}\n",
                                     inst.toString(),
                                     src->toString()).str();
            std::cerr << msg;
            FTRACE(1, "{}", msg);
            isValid = false;
          }
        }

        /*
         * Php calls kill all live temporaries.  We can't keep them
         * alive across the call because we currently have no
         * callee-saved registers in our abi, and all translations
         * share the same spill slots.
         */
        if (isCall(inst.op())) state.reset();

        for (auto& d : inst.dsts()) {
          state[d] = true;
        }
      }
    }
  );

  return isValid;
}
TYPED_TEST(DictTests, access_with_wrong_vid) {
  TypeParam p;
  auto values = dict_values<typename TypeParam::value_type>();
  auto vid = p.addValue(values.front());
  EXPECT_THROW(p.getValueForValueId(vid+1), std::out_of_range);
}
/*Main------------------------------------------------------------------------*/
int main(int argc, char* argv[]) {
   
    //input validation
   if(argc < 3 || argc > 3) {
       
      //usage message
      printf("Usage: %s [input file] [output_file]\n", argv[0]);
      exit(EXIT_FAILURE);
   }
   
   //the number of lines in the file
   int lineCount = 0;
   char line[MAX_SIZE];
   
   //set the String words
   char words[lineCount][MAX_SIZE];

   //open files for input read and output write
   FILE *in = fopen(argv[1], "r");
   FILE *out = fopen(argv[2], "w");

   //checking input write file pointer
   if(in == NULL ) {
      printf("Unable to open file %s for reading\n", argv[1]);
      exit(EXIT_FAILURE);
   }
   
   //checking output write file pointer
   if(out == NULL ) {
      printf("Unable to open file %s for writing\n", argv[2]);
      exit(EXIT_FAILURE);
   }

   //this will determine the size the string words
   while (fgets(line, MAX_SIZE, in) != NULL) {
      lineCount++;
   }
   
   //this will allow the file pointer to be at the beginning
   fclose(in);
   in = fopen(argv[1], "r");
 
   //reset lineCount to walk through the words and copy the pointers
   lineCount = 0;
   
   //copy pointers to string words
   while(fgets(line, MAX_SIZE, in) != NULL) {
      int iter;
      for (iter = 0; iter < strlen(line); iter++) {
         
         //chop the newline
         if (line[iter] == '\n') {
            words[lineCount][iter] = '\0';
            break;
         }
         words[lineCount][iter] = line[iter];
      }
      lineCount++;
   }

   //Declare a list object and initialize it with a 0
   List list = newList();
   append(list,0);

   //InsertSort walk through each item and place in correct spot
   int iter;
   for (iter = 1; iter < lineCount; iter++) {
      
      //words that are at the front and back of the list
      char *curr_word = words[iter];
      char *front_word = words[front(list)];
      char *back_word = words[back(list)];

      // if the word should be at the front of the list
      if (strcmp(curr_word, front_word) < 0) {
         prepend(list,iter);
      }
      
      //if the word should be at the end of the list
      else if (strcmp(curr_word, back_word) > 0) {
         append(list,iter);
      }
      
      //if the word belongs within the list
      else {
          
         //start the beginning of the list and work each word lexicographically
         moveTo(list,0);
         
         //compare each word until you find the correct spot and stop the loop
         while (strcmp(curr_word, words[getElement(list)]) > 0) {
            moveNext(list);
         }
         
         //when stopped this is the correct index so insert here
         insertBefore(list, iter);
      }
   }

   //output the words to the file
   for (moveTo(list,0); getIndex(list)>= 0; moveNext(list)) {
      fprintf(out,"%s\n", words[getElement(list)]);
   }

   //free all nodes in the list
   freeList(&list);
   
   //close the input read and output write files
   fclose(in);
   fclose(out);

   return(EXIT_SUCCESS);
}
Example #9
0
File: run.c Project: 8l/cmm
int
waitup(int echildok, int *retstatus)
{
	Envy *e;
	int pid;
	int slot;
	Symtab *s;
	Word *w;
	Job *j;
	char buf[ERRLEN];
	Bufblock *bp;
	int uarg = 0;
	int done;
	Node *n;
	Process *p;
	extern int runerrs;

	/* first check against the proces slist */
	if(retstatus)
		for(p = phead; p; p = p->f)
			if(p->pid == *retstatus){
				*retstatus = p->status;
				pdelete(p);
				return(-1);
			}
again:		/* rogue processes */
	pid = waitfor(buf);
	if(pid == -1){
		if(echildok > 0)
			return(1);
		else {
			fprintf(stderr, "mk: (waitup %d) ", echildok);
			perror("mk wait");
			Exit();
		}
	}
	if(DEBUG(D_EXEC))
		printf("waitup got pid=%d, status='%s'\n", pid, buf);
	if(retstatus && pid == *retstatus){
		*retstatus = buf[0]? 1:0;
		return(-1);
	}
	slot = pidslot(pid);
	if(slot < 0){
		if(DEBUG(D_EXEC))
			fprintf(stderr, "mk: wait returned unexpected process %d\n", pid);
		pnew(pid, buf[0]? 1:0);
		goto again;
	}
	j = events[slot].job;
	usage();
	nrunning--;
	events[slot].pid = -1;
	if(buf[0]){
		e = buildenv(j, slot);
		bp = newbuf();
		shprint(j->r->recipe, e, bp);
		front(bp->start);
		fprintf(stderr, "mk: %s: exit status=%s", bp->start, buf);
		freebuf(bp);
		for(n = j->n, done = 0; n; n = n->next)
			if(n->flags&DELETE){
				if(done++ == 0)
					fprintf(stderr, ", deleting");
				fprintf(stderr, " '%s'", n->name);
				delete(n->name);
			}
		fprintf(stderr, "\n");
		if(kflag){
			runerrs++;
			uarg = 1;
		} else {
			jobs = 0;
			Exit();
		}
	}
	for(w = j->t; w; w = w->next){
		if((s = symlook(w->s, S_NODE, 0)) == 0)
			continue;	/* not interested in this node */
		update(uarg, (Node *)s->value);
	}
	if(nrunning < nproclimit)
		sched();
	return(0);
}
inline RouteLeg assembleLeg(const datafacade::BaseDataFacade &facade,
                            const std::vector<PathData> &route_data,
                            const LegGeometry &leg_geometry,
                            const PhantomNode &source_node,
                            const PhantomNode &target_node,
                            const bool target_traversed_in_reverse,
                            const bool needs_summary)
{
    const auto target_duration =
        (target_traversed_in_reverse ? target_node.reverse_weight : target_node.forward_weight) /
        10.;

    auto distance = std::accumulate(
        leg_geometry.segment_distances.begin(), leg_geometry.segment_distances.end(), 0.);
    auto duration = std::accumulate(route_data.begin(),
                                    route_data.end(),
                                    0.,
                                    [](const double sum, const PathData &data) {
                                        return sum + data.duration_until_turn;
                                    }) /
                    10.;

    //                 s
    //                 |
    // Given a route a---b---c  where there is a right turn at c.
    //                       |
    //                       d
    //                       |--t
    //                       e
    // (a, b, c) gets compressed to (a,c)
    // (c, d, e) gets compressed to (c,e)
    // The duration of the turn (a,c) -> (c,e) will be the duration of (a,c) (e.g. the duration
    // of (a,b,c)).
    // The phantom node of s will contain:
    // `forward_weight`: duration of (a,s)
    // `forward_offset`: 0 (its the first segment)
    // The phantom node of t will contain:
    // `forward_weight`: duration of (d,t)
    // `forward_offset`: duration of (c, d)
    // path_data will have entries for (s,b), (b, c), (c, d) but (d, t) is only
    // caputed by the phantom node. So we need to add the target duration here.
    // On local segments, the target duration is already part of the duration, however.

    duration = duration + target_duration;
    if (route_data.empty())
    {
        duration -= (target_traversed_in_reverse ? source_node.reverse_weight
                                                 : source_node.forward_weight) /
                    10.0;
    }

    std::string summary;
    if (needs_summary)
    {
        auto summary_array = detail::summarizeRoute<detail::MAX_USED_SEGMENTS>(
            route_data, target_node, target_traversed_in_reverse);
        if (route_data.empty())
            summary_array[0] = source_node.name_id;

        BOOST_ASSERT(detail::MAX_USED_SEGMENTS > 0);
        BOOST_ASSERT(summary_array.begin() != summary_array.end());
        summary = std::accumulate(std::next(summary_array.begin()),
                                  summary_array.end(),
                                  facade.GetNameForID(summary_array.front()),
                                  [&facade](std::string previous, const std::uint32_t name_id) {
                                      if (name_id != 0)
                                      {
                                          previous += ", " + facade.GetNameForID(name_id);
                                      }
                                      return previous;
                                  });
    }

    return RouteLeg{duration, distance, summary, {}};
}
DialogSearchReplace::DialogSearchReplace(agi::Context* c, bool replace)
: wxDialog(c->parent, -1, replace ? _("Replace") : _("Find"))
, c(c)
, settings(new SearchReplaceSettings)
, has_replace(replace)
{
	auto recent_find(lagi_MRU_wxAS("Find"));
	auto recent_replace(lagi_MRU_wxAS("Replace"));

	settings->field = static_cast<SearchReplaceSettings::Field>(OPT_GET("Tool/Search Replace/Field")->GetInt());
	settings->limit_to = static_cast<SearchReplaceSettings::Limit>(OPT_GET("Tool/Search Replace/Affect")->GetInt());
	settings->find = recent_find.empty() ? std::string() : from_wx(recent_find.front());
	settings->replace_with = recent_replace.empty() ? std::string() : from_wx(recent_replace.front());
	settings->match_case = OPT_GET("Tool/Search Replace/Match Case")->GetBool();
	settings->use_regex = OPT_GET("Tool/Search Replace/RegExp")->GetBool();
	settings->ignore_comments = OPT_GET("Tool/Search Replace/Skip Comments")->GetBool();
	settings->skip_tags = OPT_GET("Tool/Search Replace/Skip Tags")->GetBool();
	settings->exact_match = false;

	auto find_sizer = new wxFlexGridSizer(2, 2, 5, 15);
	find_edit = new wxComboBox(this, -1, "", wxDefaultPosition, wxSize(300, -1), recent_find, wxCB_DROPDOWN, StringBinder(&settings->find));
	find_sizer->Add(new wxStaticText(this, -1, _("Find what:")), wxSizerFlags().Center().Left());
	find_sizer->Add(find_edit);

	if (has_replace) {
		replace_edit = new wxComboBox(this, -1, "", wxDefaultPosition, wxSize(300, -1), lagi_MRU_wxAS("Replace"), wxCB_DROPDOWN, StringBinder(&settings->replace_with));
		find_sizer->Add(new wxStaticText(this, -1, _("Replace with:")), wxSizerFlags().Center().Left());
		find_sizer->Add(replace_edit);
	}

	auto options_sizer = new wxBoxSizer(wxVERTICAL);
	options_sizer->Add(new wxCheckBox(this, -1, _("&Match case"), wxDefaultPosition, wxDefaultSize, 0, wxGenericValidator(&settings->match_case)), wxSizerFlags().Border(wxBOTTOM));
	options_sizer->Add(new wxCheckBox(this, -1, _("&Use regular expressions"), wxDefaultPosition, wxDefaultSize, 0, wxGenericValidator(&settings->use_regex)), wxSizerFlags().Border(wxBOTTOM));
	options_sizer->Add(new wxCheckBox(this, -1, _("&Skip Comments"), wxDefaultPosition, wxDefaultSize, 0, wxGenericValidator(&settings->ignore_comments)), wxSizerFlags().Border(wxBOTTOM));
	options_sizer->Add(new wxCheckBox(this, -1, _("S&kip Override Tags"), wxDefaultPosition, wxDefaultSize, 0, wxGenericValidator(&settings->skip_tags)));

	auto left_sizer = new wxBoxSizer(wxVERTICAL);
	left_sizer->Add(find_sizer, wxSizerFlags().DoubleBorder(wxBOTTOM));
	left_sizer->Add(options_sizer);

	wxString field[] = { _("Text"), _("Style"), _("Actor"), _("Effect") };
	wxString affect[] = { _("All rows"), _("Selected rows") };
	auto limit_sizer = new wxBoxSizer(wxHORIZONTAL);
	limit_sizer->Add(new wxRadioBox(this, -1, _("In Field"), wxDefaultPosition, wxDefaultSize, countof(field), field, 0, wxRA_SPECIFY_COLS, MakeEnumBinder(&settings->field)), wxSizerFlags().Border(wxRIGHT));
	limit_sizer->Add(new wxRadioBox(this, -1, _("Limit to"), wxDefaultPosition, wxDefaultSize, countof(affect), affect, 0, wxRA_SPECIFY_COLS, MakeEnumBinder(&settings->limit_to)));

	auto find_next = new wxButton(this, -1, _("&Find next"));
	auto replace_next = new wxButton(this, -1, _("Replace &next"));
	auto replace_all = new wxButton(this, -1, _("Replace &all"));
	find_next->SetDefault();

	auto button_sizer = new wxBoxSizer(wxVERTICAL);
	button_sizer->Add(find_next, wxSizerFlags().Border(wxBOTTOM));
	button_sizer->Add(replace_next, wxSizerFlags().Border(wxBOTTOM));
	button_sizer->Add(replace_all, wxSizerFlags().Border(wxBOTTOM));
	button_sizer->Add(new wxButton(this, wxID_CANCEL));

	if (!has_replace) {
		button_sizer->Hide(replace_next);
		button_sizer->Hide(replace_all);
	}

	auto top_sizer = new wxBoxSizer(wxHORIZONTAL);
	top_sizer->Add(left_sizer, wxSizerFlags().Border());
	top_sizer->Add(button_sizer, wxSizerFlags().Border());

	auto main_sizer = new wxBoxSizer(wxVERTICAL);
	main_sizer->Add(top_sizer);
	main_sizer->Add(limit_sizer, wxSizerFlags().Border());
	SetSizerAndFit(main_sizer);
	CenterOnParent();

	find_next->Bind(wxEVT_COMMAND_BUTTON_CLICKED, std::bind(&DialogSearchReplace::FindReplace, this, &SearchReplaceEngine::FindNext));
	replace_next->Bind(wxEVT_COMMAND_BUTTON_CLICKED, std::bind(&DialogSearchReplace::FindReplace, this, &SearchReplaceEngine::ReplaceNext));
	replace_all->Bind(wxEVT_COMMAND_BUTTON_CLICKED, std::bind(&DialogSearchReplace::FindReplace, this, &SearchReplaceEngine::ReplaceAll));
}
Example #12
0
/*
 * reoptimize() runs a trace through a second pass of TraceBuilder
 * optimizations, like this:
 *
 *   reset state.
 *   move all blocks to a temporary list.
 *   compute immediate dominators.
 *   for each block in trace order:
 *     if we have a snapshot state for this block:
 *       clear cse entries that don't dominate this block.
 *       use snapshot state.
 *     move all instructions to a temporary list.
 *     for each instruction:
 *       optimizeWork - do CSE and simplify again
 *       if not simplified:
 *         append existing instruction and update state.
 *       else:
 *         if the instruction has a result, insert a mov from the
 *         simplified tmp to the original tmp and discard the instruction.
 *     if the last conditional branch was turned into a jump, remove the
 *     fall-through edge to the next block.
 */
void TraceBuilder::reoptimize() {
  FTRACE(5, "ReOptimize:vvvvvvvvvvvvvvvvvvvv\n");
  SCOPE_EXIT { FTRACE(5, "ReOptimize:^^^^^^^^^^^^^^^^^^^^\n"); };
  assert(m_curTrace == m_mainTrace.get());
  assert(m_savedTraces.empty());
  assert(m_inlineSavedStates.empty());

  m_enableCse = RuntimeOption::EvalHHIRCse;
  m_enableSimplification = RuntimeOption::EvalHHIRSimplification;
  if (!m_enableCse && !m_enableSimplification) return;
  if (m_mainTrace->blocks().size() >
      RuntimeOption::EvalHHIRSimplificationMaxBlocks) {
    // TODO CSEHash::filter is very slow for large block sizes
    // t2135219 should address that
    return;
  }

  BlockList sortedBlocks = rpoSortCfg(m_mainTrace.get(), m_irFactory);
  auto const idoms = findDominators(sortedBlocks);
  clearTrackedState();

  auto blocks = std::move(m_mainTrace->blocks());
  assert(m_mainTrace->blocks().empty());
  while (!blocks.empty()) {
    Block* block = blocks.front();
    blocks.pop_front();
    assert(block->trace() == m_mainTrace.get());
    FTRACE(5, "Block: {}\n", block->id());

    m_mainTrace->push_back(block);
    if (m_snapshots[block]) {
      useState(block);
    }

    auto instructions = std::move(block->instrs());
    assert(block->empty());
    while (!instructions.empty()) {
      auto *inst = &instructions.front();
      instructions.pop_front();

      // last attempt to elide ActRecs, if we still need the InlineFPAnchor
      // it will be added back to the trace when we re-add instructions that
      // rely on it
      if (inst->op() == InlineFPAnchor) {
        continue;
      }

      // merging state looks at the current marker, and optimizeWork
      // below may create new instructions. Use the marker from this
      // instruction.
      assert(inst->marker().valid());
      setMarker(inst->marker());

      auto const tmp = optimizeWork(inst, idoms); // Can generate new instrs!
      if (!tmp) {
        // Could not optimize; keep the old instruction
        appendInstruction(inst, block);
        updateTrackedState(inst);
        continue;
      }
      SSATmp* dst = inst->dst();
      if (dst->type() != Type::None && dst != tmp) {
        // The result of optimization has a different destination than the inst.
        // Generate a mov(tmp->dst) to get result into dst. If we get here then
        // assume the last instruction in the block isn't a guard. If it was,
        // we would have to insert the mov on the fall-through edge.
        assert(block->empty() || !block->back()->isBlockEnd());
        IRInstruction* mov = m_irFactory.mov(dst, tmp, inst->marker());
        appendInstruction(mov, block);
        updateTrackedState(mov);
      }
      // Not re-adding inst; remove the inst->taken edge
      if (inst->taken()) inst->setTaken(nullptr);
    }
    if (block->back()->isTerminal()) {
      // Could have converted a conditional branch to Jmp; clear next.
      block->setNext(nullptr);
    } else {
      // if the last instruction was a branch, we already saved state
      // for the target in updateTrackedState().  Now save state for
      // the fall-through path.
      saveState(block->next());
    }
  }
}
Example #13
0
/* This main function does a little testing
   Like all good CS Majors you should test
   your code here. There is no substitute for testing
   and you should be sure to test for all edge cases
   e.g., calling remove_front on an empty list.
*/
int main(void)
{
	/* Now to make use of all of this stuff */
	list* llist = create_list();

  /* What does an empty list contain?  Lets use our handy traversal function */
  printf("TEST CASE 1\nAn Empty list should print nothing here:\n");
  traverse(llist, print_person);
	printf("\n");

 	/* Lets add a person from front and then print */
 	push_front(llist, create_person("Andrew", 24));
 	printf("TEST CASE 2\nA List with one person should print that person:\n");
 	traverse(llist, print_person);
 	printf("\n");

  
  /* Lets remove two persons from front and then print */
  remove_front(llist, free_person); // remove a list with more than 1 elements
 	remove_front(llist, free_person); // remove a list with only 1 element
 	printf("TEST CASE 3\nAnother Empty list should print nothing here:\n");
 	traverse(llist, print_person);
 	printf("\n");

 	/* Lets add two people and then print */
 	push_front(llist, create_person("Nick", 22));
 	push_front(llist, create_person("Randal", 21));
 	printf("TEST CASE 4\nA List with two people should print those two people:\n");
 	traverse(llist, print_person);
 	printf("\n");

	/* Lets copy this list */
	list* llist2 = copy_list(llist, copy_person);
	printf("TEST CASE 5\nA copied list should print out the same two people:\n");
 	traverse(llist2, print_person);
 	printf("\n");

  /* Lets kill the list */
  empty_list(llist, free_person);
 	printf("TEST CASE 6\nAfter freeing all nodes the list should be empty:\n");
 	traverse(llist, print_person);
	printf("\n");

	/* Let's make a list of people, and remove certain ones! */
	/* Should remove anyone whose name is 8+ characters long */
	push_front(llist, create_person("Josephine", 27));
	push_front(llist, create_person("Dave", 34));
	push_front(llist, create_person("Benjamin", 23));
	push_front(llist, create_person("Lisa", 41));
	push_front(llist, create_person("Maximilian", 24));
	remove_if(llist, long_name, free_person);
	printf("TEST CASE 7\nShould only print 2 people with short names:\n");
	traverse(llist, print_person);
  printf("\n");
  
  /* Testing over clean up*/
  empty_list(llist, free_person);
  free(llist);
  empty_list(llist2, free_person);
  free(llist2);


  // MY TESTS!!!

  // Test case 8 -- create_list(); is_empty() and size() when the list is empty
  list* myList = create_list();
  printf("TEST CASE 8\nShould print 1 and then 0:\n");
  printf("%d\t", is_empty(myList));
  printf("%d\n", size(myList));
  printf("\n");

  // Test case 9 -- front() and back() when the list is empty
  printf("TEST CASE 9\nShould print nothing:\n");
  print_person(front(myList));
  print_person(back(myList));
  printf("\n");

  // Test case 10 -- push_front() and push_back() and traverse()
  push_front(myList, create_person("Dan", 24));
  push_back(myList, create_person("Sun", 24));
  push_front(myList, create_person("Someone", 100));
  push_back(myList, create_person("Somebody", 1));
  printf("TEST CASE 10\nShould print 4 people in the order of Someone-Dan-Sun-Somebody:\n");
  traverse(myList, print_person);
  printf("\n");

  // Test case 11 -- size() and is_empty() when the list is not empty
  printf("TEST CASE 11\nShould return 0 and then 4:\n");
  printf("%d\t", is_empty(myList));
  printf("%d\n", size(myList));
  printf("\n");

  // Test case 12 -- front() and back() when the list is not empty
  printf("TEST CASE 12\nShould print Someone then Somebody:\n");
  print_person(front(myList));
  print_person(back(myList));
  printf("\n");

  // Test case 13 -- copy_list()
  list* myListCopy = copy_list(myList, copy_person);
  printf("TEST CASE 13\nA copied list should print 4 people in the order of Someone-Dan-Sun-Somebody:\n");
  traverse(myListCopy, print_person);
  printf("\n");

  // Test case 14 -- remove_front() and remove_back() when the list is not empty
  remove_front(myList, free_person);
  remove_back(myList, free_person);
  printf("TEST CASE 13\nShould print 2 people in the order of Dan-Sun:\n");
  traverse(myList, print_person);
  printf("\n");

  // Test case 15 -- remove_if()
  push_front(myList, create_person("LLLLLLLLLLLLL", 1));
  push_front(myList, create_person("MMMMMMMMM", 1));
  push_front(myList, create_person("AAA", 3));
  push_back(myList, create_person("DDD", 5));
  push_back(myList, create_person("T", 10));
  push_back(myList, create_person("VVVVVVVVVV", 1));
  remove_if(myList, long_name, free_person);
  printf("TEST CASE 13\nShould print 5 people:\n");
  traverse(myList, print_person);
  printf("\n");

  // Test case 16 -- empty_list()
  empty_list(myList, free_person);
  printf("TEST CASE 16\nShould print nothing:\n");
  traverse(myList, print_person);
  printf("\n");

  // Test case 17 -- remove_front() and remove_back() when the list is empty
  remove_front(myList, free_person);
  remove_back(myList, free_person);
  printf("TEST CASE 17\nNo error should occur: \n");
  printf("\n");

  // Test case 18 -- push big data into the copied list to test push_front() and copy_list()
  for (int i = 0; i < 1000000; i++) {
    push_front(myListCopy, create_person("BIG", 1));
  }
  printf("TEST CASE 18\nShould print 1000004 (add a large number of data): \n");
  printf("%d\n", size(myListCopy));
  printf("\n");

  empty_list(myList, free_person);
  free(myList);

  empty_list(myListCopy, free_person);
  free(myListCopy);

  return 0;
}
Example #14
0
void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
                                           ChunkManager* manager,
                                           Chunk* chunk,
                                           long dataWritten) {
    // Disable lastError tracking so that any errors, which occur during auto-split do not get
    // bubbled up on the client connection doing a write
    LastError::Disabled disableLastError(&LastError::get(opCtx->getClient()));

    const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();

    const bool minIsInf =
        (0 == manager->getShardKeyPattern().getKeyPattern().globalMin().woCompare(chunk->getMin()));
    const bool maxIsInf =
        (0 == manager->getShardKeyPattern().getKeyPattern().globalMax().woCompare(chunk->getMax()));

    const uint64_t chunkBytesWritten = chunk->addBytesWritten(dataWritten);

    const uint64_t desiredChunkSize =
        calculateDesiredChunkSize(balancerConfig->getMaxChunkSizeBytes(), manager->numChunks());

    if (!chunk->shouldSplit(desiredChunkSize, minIsInf, maxIsInf)) {
        return;
    }

    const NamespaceString nss(manager->getns());

    if (!manager->_autoSplitThrottle._splitTickets.tryAcquire()) {
        LOG(1) << "won't auto split because not enough tickets: " << nss;
        return;
    }

    TicketHolderReleaser releaser(&(manager->_autoSplitThrottle._splitTickets));

    const ChunkRange chunkRange(chunk->getMin(), chunk->getMax());

    try {
        // Ensure we have the most up-to-date balancer configuration
        uassertStatusOK(balancerConfig->refreshAndCheck(opCtx));

        if (!balancerConfig->getShouldAutoSplit()) {
            return;
        }

        LOG(1) << "about to initiate autosplit: " << redact(chunk->toString())
               << " dataWritten: " << chunkBytesWritten
               << " desiredChunkSize: " << desiredChunkSize;

        const uint64_t chunkSizeToUse = [&]() {
            const uint64_t estNumSplitPoints = chunkBytesWritten / desiredChunkSize * 2;

            if (estNumSplitPoints >= kTooManySplitPoints) {
                // The current desired chunk size will split the chunk into lots of small chunk and
                // at the worst case this can result into thousands of chunks. So check and see if a
                // bigger value can be used.
                return std::min(chunkBytesWritten, balancerConfig->getMaxChunkSizeBytes());
            } else {
                return desiredChunkSize;
            }
        }();

        auto splitPoints =
            uassertStatusOK(shardutil::selectChunkSplitPoints(opCtx,
                                                              chunk->getShardId(),
                                                              nss,
                                                              manager->getShardKeyPattern(),
                                                              chunkRange,
                                                              chunkSizeToUse,
                                                              boost::none));

        if (splitPoints.size() <= 1) {
            // No split points means there isn't enough data to split on; 1 split point means we
            // have
            // between half the chunk size to full chunk size so there is no need to split yet
            chunk->clearBytesWritten();
            return;
        }

        if (minIsInf || maxIsInf) {
            // We don't want to reset _dataWritten since we want to check the other side right away
        } else {
            // We're splitting, so should wait a bit
            chunk->clearBytesWritten();
        }

        // We assume that if the chunk being split is the first (or last) one on the collection,
        // this chunk is likely to see more insertions. Instead of splitting mid-chunk, we use the
        // very first (or last) key as a split point.
        //
        // This heuristic is skipped for "special" shard key patterns that are not likely to produce
        // monotonically increasing or decreasing values (e.g. hashed shard keys).
        if (KeyPattern::isOrderedKeyPattern(manager->getShardKeyPattern().toBSON())) {
            if (minIsInf) {
                BSONObj key = findExtremeKeyForShard(
                    opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
                if (!key.isEmpty()) {
                    splitPoints.front() = key.getOwned();
                }
            } else if (maxIsInf) {
                BSONObj key = findExtremeKeyForShard(
                    opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
                if (!key.isEmpty()) {
                    splitPoints.back() = key.getOwned();
                }
            }
        }

        const auto suggestedMigrateChunk =
            uassertStatusOK(shardutil::splitChunkAtMultiplePoints(opCtx,
                                                                  chunk->getShardId(),
                                                                  nss,
                                                                  manager->getShardKeyPattern(),
                                                                  manager->getVersion(),
                                                                  chunkRange,
                                                                  splitPoints));

        // Balance the resulting chunks if the option is enabled and if the shard suggested a chunk
        // to balance
        const bool shouldBalance = [&]() {
            if (!balancerConfig->shouldBalanceForAutoSplit())
                return false;

            auto collStatus =
                Grid::get(opCtx)->catalogClient()->getCollection(opCtx, manager->getns());
            if (!collStatus.isOK()) {
                log() << "Auto-split for " << nss << " failed to load collection metadata"
                      << causedBy(redact(collStatus.getStatus()));
                return false;
            }

            return collStatus.getValue().value.getAllowBalance();
        }();

        log() << "autosplitted " << nss << " chunk: " << redact(chunk->toString()) << " into "
              << (splitPoints.size() + 1) << " parts (desiredChunkSize " << desiredChunkSize << ")"
              << (suggestedMigrateChunk ? "" : (std::string) " (migrate suggested" +
                          (shouldBalance ? ")" : ", but no migrations allowed)"));

        // Reload the chunk manager after the split
        auto routingInfo = uassertStatusOK(
            Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
                                                                                         nss));

        if (!shouldBalance || !suggestedMigrateChunk) {
            return;
        }

        // Top chunk optimization - try to move the top chunk out of this shard to prevent the hot
        // spot from staying on a single shard. This is based on the assumption that succeeding
        // inserts will fall on the top chunk.

        // We need to use the latest chunk manager (after the split) in order to have the most
        // up-to-date view of the chunk we are about to move
        auto suggestedChunk = routingInfo.cm()->findIntersectingChunkWithSimpleCollation(
            suggestedMigrateChunk->getMin());

        ChunkType chunkToMove;
        chunkToMove.setNS(nss.ns());
        chunkToMove.setShard(suggestedChunk->getShardId());
        chunkToMove.setMin(suggestedChunk->getMin());
        chunkToMove.setMax(suggestedChunk->getMax());
        chunkToMove.setVersion(suggestedChunk->getLastmod());

        uassertStatusOK(configsvr_client::rebalanceChunk(opCtx, chunkToMove));

        // Ensure the collection gets reloaded because of the move
        Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss);
    } catch (const DBException& ex) {
        chunk->clearBytesWritten();

        if (ErrorCodes::isStaleShardingError(ErrorCodes::Error(ex.getCode()))) {
            log() << "Unable to auto-split chunk " << redact(chunkRange.toString()) << causedBy(ex)
                  << ", going to invalidate routing table entry for " << nss;
            Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss);
        }
    }
}
Example #15
0
/*
 * reoptimize() runs a trace through a second pass of IRBuilder
 * optimizations, like this:
 *
 *   reset state.
 *   move all blocks to a temporary list.
 *   compute immediate dominators.
 *   for each block in trace order:
 *     if we have a snapshot state for this block:
 *       clear cse entries that don't dominate this block.
 *       use snapshot state.
 *     move all instructions to a temporary list.
 *     for each instruction:
 *       optimizeWork - do CSE and simplify again
 *       if not simplified:
 *         append existing instruction and update state.
 *       else:
 *         if the instruction has a result, insert a mov from the
 *         simplified tmp to the original tmp and discard the instruction.
 *     if the last conditional branch was turned into a jump, remove the
 *     fall-through edge to the next block.
 */
void IRBuilder::reoptimize() {
  Timer _t("optimize_reoptimize");
  FTRACE(5, "ReOptimize:vvvvvvvvvvvvvvvvvvvv\n");
  SCOPE_EXIT { FTRACE(5, "ReOptimize:^^^^^^^^^^^^^^^^^^^^\n"); };
  always_assert(m_savedBlocks.empty());
  always_assert(!m_curWhere);
  always_assert(m_state.inlineDepth() == 0);

  m_state.setEnableCse(RuntimeOption::EvalHHIRCse);
  m_enableSimplification = RuntimeOption::EvalHHIRSimplification;
  if (!m_state.enableCse() && !m_enableSimplification) return;
  setConstrainGuards(false);

  auto blocksIds = rpoSortCfgWithIds(m_unit);
  auto const idoms = findDominators(m_unit, blocksIds);
  m_state.clear();

  for (auto* block : blocksIds.blocks) {
    FTRACE(5, "Block: {}\n", block->id());

    m_state.startBlock(block);
    m_curBlock = block;

    auto nextBlock = block->next();
    auto backMarker = block->back().marker();
    auto instructions = block->moveInstrs();
    assert(block->empty());
    while (!instructions.empty()) {
      auto* inst = &instructions.front();
      instructions.pop_front();

      // merging state looks at the current marker, and optimizeWork
      // below may create new instructions. Use the marker from this
      // instruction.
      assert(inst->marker().valid());
      setMarker(inst->marker());

      auto const tmp = optimizeWork(inst, idoms); // Can generate new instrs!
      if (!tmp) {
        // Could not optimize; keep the old instruction
        appendInstruction(inst);
        continue;
      }

      SSATmp* dst = inst->dst();
      if (dst != tmp) {
        // The result of optimization has a different destination than the inst.
        // Generate a mov(tmp->dst) to get result into dst. If we get here then
        // assume the last instruction in the block isn't a guard. If it was,
        // we would have to insert the mov on the fall-through edge.
        assert(block->empty() || !block->back().isBlockEnd());
        appendInstruction(m_unit.mov(dst, tmp, inst->marker()));
      }

      if (inst->isBlockEnd()) {
        // We're not re-adding the block-end instruction. Unset its edges.
        inst->setTaken(nullptr);
        inst->setNext(nullptr);
      }
    }

    if (block->empty() || !block->back().isBlockEnd()) {
      // Our block-end instruction was eliminated (most likely a Jmp* converted
      // to a nop). Replace it with a jump to the next block.
      appendInstruction(m_unit.gen(Jmp, backMarker, nextBlock));
    }

    m_state.finishBlock(block);
  }
}
void QmitkExampleView::ProcessSelectedImage()
{
  // Before we even think about processing something, we need to make sure
  // that we have valid input. Don't be sloppy, this is a main reason
  // for application crashes if neglected.

  auto selectedDataNodes = this->GetDataManagerSelection();

  if (selectedDataNodes.empty())
    return;

  auto firstSelectedDataNode = selectedDataNodes.front();

  if (firstSelectedDataNode.IsNull())
  {
    QMessageBox::information(nullptr, "Example View", "Please load and select an image before starting image processing.");
    return;
  }

  auto data = firstSelectedDataNode->GetData();

  // Something is selected, but does it contain data?
  if (data != nullptr)
  {
    // We don't use the auto keyword here, which would evaluate to a native
    // image pointer. Instead, we want a smart pointer in order to ensure that
    // the image isn't deleted somewhere else while we're using it.
    mitk::Image::Pointer image = dynamic_cast<mitk::Image*>(data);

    // Something is selected and it contains data, but is it an image?
    if (image.IsNotNull())
    {
      auto imageName = firstSelectedDataNode->GetName();
      auto offset = m_Controls.offsetSpinBox->value();

      MITK_INFO << "Process image \"" << imageName << "\" ...";

      // We're finally using the ExampleImageFilter from ExtExampleModule.
      auto filter = ExampleImageFilter::New();
      filter->SetInput(image);
      filter->SetOffset(offset);

      filter->Update();

      mitk::Image::Pointer processedImage = filter->GetOutput();

      if (processedImage.IsNull() || !processedImage->IsInitialized())
        return;

      MITK_INFO << "  done";

      // Stuff the resulting image into a data node, set some properties,
      // and add it to the data storage, which will eventually display the
      // image in the application.
      auto processedImageDataNode = mitk::DataNode::New();
      processedImageDataNode->SetData(processedImage);

      QString name = QString("%1 (Offset: %2)").arg(imageName.c_str()).arg(offset);
      processedImageDataNode->SetName(name.toStdString());

      // We don't really need to copy the level window, but if we wouldn't
      // do it, the new level window would be initialized to display the image
      // with optimal contrast in order to capture the whole range of pixel
      // values. This is also true for the input image as long as one didn't
      // modify its level window manually. Thus, the images would appear
      // identical unless you compare the level window widget for both images.
      mitk::LevelWindow levelWindow;

      if (firstSelectedDataNode->GetLevelWindow(levelWindow))
        processedImageDataNode->SetLevelWindow(levelWindow);

      // We also attach our ExampleImageInteractor, which allows us to paint
      // on the resulting images by using the mouse as long as the CTRL key
      // is pressed.
      auto interactor = CreateExampleImageInteractor();

      if (interactor.IsNotNull())
        interactor->SetDataNode(processedImageDataNode);

      this->GetDataStorage()->Add(processedImageDataNode);
    }
  }

  // Now it's your turn. This class/method has lots of room for improvements,
  // for example:
  //
  // - What happens when multiple items are selected but the first one isn't
  //   an image? - There isn't any feedback for the user at all.
  // - What's the front item of a selection? Does it depend on the order
  //   of selection or the position in the Data Manager? - Isn't it
  //   better to process all selected images? Don't forget to adjust the
  //   titles of the UI widgets.
  // - In addition to the the displayed label, it's probably a good idea to
  //   enable or disable the button depending on the selection.
}
const QImage& ImageTransformer::
SobelTransform()
{
    QImage NewImage(_DataHandled.size(),_DataHandled.format());

    int width = NewImage.width();
    int height = NewImage.height();
    int depth = NewImage.depth();

    if(depth == 32)
    {
        for(int i=1;i<width-1;++i)
        {
            for(int j=1;j<height-1;++j)
            {
                /*O O O
                 *O X O
                 *O O O
                 */
                QColor front_left(_DataHandled.pixel(i-1,j-1));
                QColor front(_DataHandled.pixel(i,j-1));
                QColor front_right(_DataHandled.pixel(i-1,j+1));

                QColor left(_DataHandled.pixel(i-1,j));
                QColor right(_DataHandled.pixel(i+1,j));

                QColor back_left(_DataHandled.pixel(i-1,j+1));
                QColor back(_DataHandled.pixel(i,j+1));
                QColor back_right(_DataHandled.pixel(i+1,j+1));

                int red_x = (front_left.red()+2*front.red()+front_right.red()) -
                        (back_left.red() +2*back.red() + back_right.red());
                int green_x = (front_left.green()+2*front.green()+front_right.green()) -
                        (back_left.green() +2*back.green() + back_right.green());
                int blue_x = (front_left.blue()+2*front.blue()+front_right.blue()) -
                        (back_left.blue() +2*back.blue() + back_right.blue());

                int red_y = (front_left.red()+2*left.red()+back_left.red())-
                        (front_right.red() +2*right.red()+back_right.red());
                int green_y = (front_left.green()+2*left.green()+back_left.green())-
                        (front_right.green() +2*right.green()+back_right.green());
                int blue_y = (front_left.blue()+2*left.blue()+back_left.blue())-
                        (front_right.blue() +2*right.blue()+back_right.blue());

                int red = static_cast<int>(round(sqrt(pow(red_x,2)+pow(red_y,2))));
                int green = static_cast<int>(round(sqrt(pow(green_x,2)+pow(green_y,2))));
                int blue = static_cast<int>(round(sqrt(pow(blue_x,2)+pow(blue_y,2))));

                red = (red > 255)? 255:red;
                green = (green > 255)? 255:green;
                blue = (blue > 255)? 255:blue;

                red = (red < 0)? 0:red;
                green = (green < 0)? 0:green;
                blue = (blue < 0)? 0:blue;

                NewImage.setPixel(i,j,QColor(red,green,blue).rgb());
            }
        }
        _DataHandled = NewImage;
    }
    return _DataHandled;
}
bool SegmentTulipDepth::run(Communicator *comm, const Options &options, ShapeGraph &map, bool simple_version) {

    AttributeTable &attributes = map.getAttributeTable();

    std::string stepdepth_col_text = "Angular Step Depth";
    int stepdepth_col = attributes.insertOrResetColumn(stepdepth_col_text.c_str());

    // The original code set tulip_bins to 1024, divided by two and added one
    // in order to duplicate previous code (using a semicircle of tulip bins)
    size_t tulip_bins = 513;

    std::vector<bool> covered(map.getConnections().size());
    for (size_t i = 0; i < map.getConnections().size(); i++) {
       covered[i] = false;
    }
    std::vector<std::vector<SegmentData> > bins(tulip_bins);

    int opencount = 0;
    for (auto& sel: map.getSelSet()) {
       int row = depthmapX::getMapAtIndex(map.getAllShapes(), sel)->first;
       if (row != -1) {
          bins[0].push_back(SegmentData(0,row,SegmentRef(),0,0.0,0));
          opencount++;
       }
    }
    int depthlevel = 0;
    auto binIter = bins.begin();
    int currentbin = 0;
    while (opencount) {
       while (binIter->empty()) {
          depthlevel++;
          binIter++;
          currentbin++;
          if (binIter == bins.end()) {
             binIter = bins.begin();
          }
       }
       SegmentData lineindex;
       if (binIter->size() > 1) {
          // it is slightly slower to delete from an arbitrary place in the bin,
          // but it is necessary to use random paths to even out the number of times through equal paths
          int curr = pafrand() % binIter->size();
          auto currIter = binIter->begin() + curr;
          lineindex = *currIter;
          binIter->erase(currIter);
          // note: do not clear choice values here!
       }
       else {
          lineindex = binIter->front();
          binIter->pop_back();
       }
       opencount--;
       if (!covered[lineindex.ref]) {
          covered[lineindex.ref] = true;
          Connector& line = map.getConnections()[lineindex.ref];
          // convert depth from tulip_bins normalised to standard angle
          // (note the -1)
          double depth_to_line = depthlevel / ((tulip_bins - 1) * 0.5);
          map.getAttributeRowFromShapeIndex(lineindex.ref).setValue(stepdepth_col,depth_to_line);
          register int extradepth;
          if (lineindex.dir != -1) {
             for (auto& segconn: line.m_forward_segconns) {
                if (!covered[segconn.first.ref]) {
                   extradepth = (int) floor(segconn.second * tulip_bins * 0.5);
                   auto currIter = binIter;
                   bins[(currentbin + tulip_bins + extradepth) % tulip_bins].push_back(
                       SegmentData(segconn.first,lineindex.ref,lineindex.segdepth+1,0.0,0));
                   opencount++;
                }
             }
          }
          if (lineindex.dir != 1) {
             for (auto& segconn: line.m_back_segconns) {
                if (!covered[segconn.first.ref]) {
                   extradepth = (int) floor(segconn.second * tulip_bins * 0.5);
                   bins[(currentbin + tulip_bins + extradepth) % tulip_bins].push_back(
                       SegmentData(segconn.first,lineindex.ref,lineindex.segdepth+1,0.0,0));
                   opencount++;
                 }
             }
          }
       }
    }

    map.setDisplayedAttribute(-2); // <- override if it's already showing
    map.setDisplayedAttribute(stepdepth_col);

    return true;
}
int main(void)
{
	int capacity;
	scanf("%d", &capacity);
	struct queue * q;
	q = create (capacity);
	
	printf("1.enqueue\n2.dequeue\n3.front\n4.rear\n5.isempty\n6.isfull\n7.end\n");
	int end;
	int test;
	int num;
	
	end = 1;
		
	while (end) {
		scanf("%d", &test);
		switch (test){
			case 1:
				scanf("%d", &num);
				enqueue(q, num);
				break;
			case 2:
				num = dequeue(q);
				if (num == -1) {
					printf("Underflow\n)");
				} else {
					printf("%d\n",num);
				}
				break;
			case 3:
				num = front(q);
				if (num != -1) {
					printf("%d\n", num);
				}
				break;
			case 4:
				num = rear(q);
				if (num != -1) {
					printf("%d\n", num);
				}
				break;
			case 5:
				num = isempty(q);
				if (num == 0) {
					printf("False\n");
				} else {
					printf("True\n");
				}
				break;
			case 6:
				num = isfull(q);
				if (num == 0) {			
					printf("False\n");
				} else {
					printf("True\n");
				}
				break;
			case 7:
				end = 0;
				break;
		}
	}
	return 0;
}
Example #20
0
void CmdPartDesignMigrate::activated(int iMsg)
{
    Q_UNUSED(iMsg);
    App::Document *doc = getDocument();

    std::set<PartDesign::Feature*> migrateFeatures;


    // Retrive all PartDesign Features objects and filter out features already belongs to some body
    for ( const auto & feat: doc->getObjects(  ) ) {
         if( feat->isDerivedFrom( PartDesign::Feature::getClassTypeId() ) &&
                 !PartDesign::Body::findBodyOf( feat ) && PartDesign::Body::isSolidFeature ( feat ) ) {
             migrateFeatures.insert ( static_cast <PartDesign::Feature *>( feat ) );
         }
    }

    if ( migrateFeatures.empty() ) {
        if ( !PartDesignGui::isModernWorkflow ( doc ) ) {
            // If there is nothing to migrate and workflow is still old just set it to modern
            PartDesignGui::WorkflowManager::instance()->forceWorkflow (
                    doc, PartDesignGui::Workflow::Modern );
        } else {
            // Huh? nothing to migrate?
            QMessageBox::warning ( 0, QObject::tr ( "Nothing to migrate" ),
                    QObject::tr ( "No PartDesign features which doesn't belong to a body found."
                        " Nothing to migrate." ) );
        }
        return;
    }

    // Note: this action is undoable, should it be?
    PartDesignGui::WorkflowManager::instance()->forceWorkflow ( doc, PartDesignGui::Workflow::Modern );

    // Put features into chains. Each chain should become a separate body.
    std::list< std::list<PartDesign::Feature *> > featureChains;
    std::list<PartDesign::Feature *> chain; //< the current chain we are working on

    for (auto featIt = migrateFeatures.begin(); !migrateFeatures.empty(); ) {
        Part::Feature *base = (*featIt)->getBaseObject( /*silent =*/ true );

        chain.push_front ( *featIt );

        if ( !base || !base->isDerivedFrom (PartDesign::Feature::getClassTypeId () ) ||
                PartDesignGui::isAnyNonPartDesignLinksTo ( static_cast <PartDesign::Feature *>(base),
                                                           /*respectGroups=*/ true ) ) {
            // a feature based on nothing as well as on non-partdesign solid starts a new chain
            auto newChainIt = featureChains.emplace (featureChains.end());
            newChainIt->splice (newChainIt->end(), chain);
        } else {
            // we are basing on some partdesign feature which supposed to belong to some body
            PartDesign::Feature *baseFeat = static_cast <PartDesign::Feature *>( base );

            auto baseFeatSetIt = find ( migrateFeatures.begin (), migrateFeatures.end (), baseFeat );

            if ( baseFeatSetIt != migrateFeatures.end() ) {
                // base feature is pending for migration, switch to it and continue over
                migrateFeatures.erase(featIt);
                featIt = baseFeatSetIt;
                continue;
            } else {
                // The base feature seems already assigned to some chain
                // Find which
                std::list<PartDesign::Feature *>::iterator baseFeatIt;
                auto chainIt = std::find_if( featureChains.begin(), featureChains.end(),
                        [baseFeat, &baseFeatIt] ( std::list<PartDesign::Feature *>&chain ) mutable -> bool {
                            baseFeatIt = std::find( chain.begin(), chain.end(), baseFeat );
                            return baseFeatIt !=  chain.end();
                        } );

                if ( chainIt != featureChains.end() ) {
                    assert (baseFeatIt != chainIt->end());
                    if ( std::next ( baseFeatIt ) == chainIt->end() ) {
                        // just append our chain to already found
                        chainIt->splice ( chainIt->end(), chain );
                        // TODO If we will hit a third part everything will be messed up again.
                        //      Probably it will require a yet another smart-ass find_if. (2015-08-10, Fat-Zer)
                    } else {
                        // We have a fork of a partDesign feature here
                        // add a chain for current body
                        auto newChainIt = featureChains.emplace (featureChains.end());
                        newChainIt->splice (newChainIt->end(), chain);
                        // add a chain for forked one
                        newChainIt = featureChains.emplace (featureChains.end());
                        newChainIt->splice (newChainIt->end(), *chainIt,
                                std::next ( baseFeatIt ), chainIt->end());
                    }
                } else {
                    // The feature is not present in list pending for migration,
                    // This generally shouldn't happen but may be if we run into some broken file
                    // Try to find out the body we should insert into
                    // TODO Some error/warning is needed here (2015-08-10, Fat-Zer)
                    auto newChainIt = featureChains.emplace (featureChains.end());
                    newChainIt->splice (newChainIt->end(), chain);
                }
            }
        }
        migrateFeatures.erase ( featIt );
        featIt = migrateFeatures.begin ();
        // TODO Align visibility (2015-08-17, Fat-Zer)
    } /* for */

    // TODO make it work without parts (2015-09-04, Fat-Zer)
    // add a part if there is no active yet
    App::Part *actPart = PartDesignGui::assertActivePart ();

    if (!actPart) {
        return;
    }

    // do the actual migration
    Gui::Command::openCommand("Migrate legacy part design features to Bodies");

    for ( auto chainIt = featureChains.begin(); !featureChains.empty();
            featureChains.erase (chainIt), chainIt = featureChains.begin () ) {
#ifndef FC_DEBUG
        if ( chainIt->empty () ) { // prevent crash in release in case of errors
            continue;
        }
#else
        assert ( !chainIt->empty () );
#endif
        Part::Feature *base = chainIt->front()->getBaseObject ( /*silent =*/ true );

        // Find a suitable chain to work with
        for( ; chainIt != featureChains.end(); chainIt ++) {
            base = chainIt->front()->getBaseObject ( /*silent =*/ true );
            if (!base || !base->isDerivedFrom ( PartDesign::Feature::getClassTypeId () ) ) {
                break;   // no base is ok
            } else {
                // The base feature is a PartDesign, it's a fork, try to reassign it to a body...
                base = PartDesign::Body::findBodyOf ( base );
                if ( base ) {
                    break;
                }
            }
        }

        if ( chainIt == featureChains.end() ) {
            // Shouldn't happen, may be only in case of some circular dependency?
            // TODO Some error message (2015-08-11, Fat-Zer)
            chainIt = featureChains.begin();
            base = chainIt->front()->getBaseObject ( /*silent =*/ true );
        }

        // Construct a Pretty Body name based on the Tip
        std::string bodyName = getUniqueObjectName (
                std::string ( chainIt->back()->getNameInDocument() ).append ( "Body" ).c_str () ) ;

        // Create a body for the chain
        doCommand ( Doc,"App.activeDocument().addObject('PartDesign::Body','%s')", bodyName.c_str () );
        doCommand ( Doc,"App.activeDocument().%s.addObject(App.ActiveDocument.%s)",
                actPart->getNameInDocument (), bodyName.c_str () );
        if (base) {
            doCommand ( Doc,"App.activeDocument().%s.BaseFeature = App.activeDocument().%s",
                bodyName.c_str (), base->getNameInDocument () );
        }

        // Fill the body with features
        for ( auto feature: *chainIt ) {
            if ( feature->isDerivedFrom ( PartDesign::ProfileBased::getClassTypeId() ) ) {
                // add the sketch and also reroute it if needed
                PartDesign::ProfileBased *sketchBased = static_cast<PartDesign::ProfileBased *> ( feature );
                Part::Part2DObject *sketch = sketchBased->getVerifiedSketch( /*silent =*/ true);
                if ( sketch ) {
                    doCommand ( Doc,"App.activeDocument().%s.addObject(App.activeDocument().%s)",
                            bodyName.c_str (), sketch->getNameInDocument() );

                    if ( sketch->isDerivedFrom ( Sketcher::SketchObject::getClassTypeId() ) ) {
                        try {
                            PartDesignGui::fixSketchSupport ( static_cast<Sketcher::SketchObject *> ( sketch ) );
                        } catch (Base::Exception &) {
                            QMessageBox::critical(Gui::getMainWindow(),
                                    QObject::tr("Sketch plane cannot be migrated"),
                                    QObject::tr("Please edit '%1' and redefine it to use a Base or "
                                        "Datum plane as the sketch plane.").
                                    arg(QString::fromUtf8(sketch->Label.getValue()) ) );
                        }
                    } else {
                        // TODO Message that sketchbased is based not on a sketch (2015-08-11, Fat-Zer)
                    }
                }
            }
            doCommand ( Doc,"App.activeDocument().%s.addObject(App.activeDocument().%s)",
                    bodyName.c_str (), feature->getNameInDocument() );

            PartDesignGui::relinkToBody ( feature );
        }

    }

    updateActive();
}
Example #21
0
void Mesher::check_feature()
{
    auto contour = get_contour();
    const auto normals = get_normals(contour);

    // Find the largest cone and the normals that enclose
    // the largest angle as n0, n1.
    float theta = 1;
    Vec3f n0, n1;
    for (auto ni : normals)
    {
        for (auto nj : normals)
        {
            float dot = ni.dot(nj);
            if (dot < theta)
            {
                theta = dot;
                n0 = ni;
                n1 = nj;
            }
        }
    }

    // If there isn't a feature in this fan, then return immediately.
    if (theta > 0.9)
        return;

    // Decide whether this is a corner or edge feature.
    const Vec3f nstar = n0.cross(n1);
    float phi = 0;
    for (auto n : normals)
        phi = fmax(phi, fabs(nstar.dot(n)));
    bool edge = phi < 0.7;

    // Find the center of the contour.
    Vec3f center(0, 0, 0);
    for (auto c : contour)
        center += c;
    center /= contour.size();

    // Construct the matrices for use in our least-square fit.
    Eigen::MatrixX3d A(normals.size(), 3);
    {
        int i=0;
        for (auto n : normals)
            A.row(i++) << n.transpose();
    }

    // When building the second matrix, shift position values to be centered
    // about the origin (because that's what the least-squares fit will
    // minimize).
    Eigen::VectorXd B(normals.size(), 1);
    {
        auto n = normals.begin();
        auto c = contour.begin();
        int i=0;
        while (n != normals.end())
            B.row(i++) << (n++)->dot(*(c++) - center);
    }

    // Use singular value decomposition to solve the least-squares fit.
    Eigen::JacobiSVD<Eigen::MatrixX3d> svd(A, Eigen::ComputeFullU |
                                              Eigen::ComputeFullV);

    // Set the smallest singular value to zero to make fitting happier.
    if (edge)
    {
        auto singular = svd.singularValues();
        svd.setThreshold(singular.minCoeff() / singular.maxCoeff() * 1.01);
    }

    // Solve for the new point's position.
    const Vec3f new_pt = svd.solve(B) + center;

    // Erase this triangle fan, as we'll be inserting a vertex in the center.
    triangles.erase(fan_start, voxel_start);

    // Construct a new triangle fan.
    contour.push_back(contour.front());
    {
        auto p0 = contour.begin();
        auto p1 = contour.begin();
        p1++;
        while (p1 != contour.end())
            push_swappable_triangle(Triangle(*(p0++), *(p1++), new_pt));
    }
}
ResolveCorrespondence::ResolveCorrespondence(Structure::Graph *shapeA,
                                             Structure::Graph *shapeB,
                                             QVector<QPair<QString, QString> > orig_pairs,
                                             GraphCorresponder *gcorr)
{
    auto groupsA = shapeA->groups;
    auto groupsB = shapeB->groups;

    auto isInGroup = [&](QString nodeID, QVector< QVector<QString> > groups){
        for(auto group : groups){
            if(group.contains(nodeID))
                return true;
        }
        return false;
    };

    // State
    QMap<QString, bool> matchedA, matchedB;
    for(auto n : shapeA->nodes) matchedA[n->id] = false;
    for(auto n : shapeB->nodes) matchedB[n->id] = false;

    // Matching as a map
    QMap<QString, QString> pairsA, pairsB;
    for(auto pair : orig_pairs)
    {
        pairsA[pair.first] = pair.second;
        pairsB[pair.second] = pair.first;
    }

    // Go over source nodes
    for(auto n : shapeA->nodes)
    {
        if(matchedA[n->id]) continue;

        // Check if it is matched
        if( pairsA.contains(n->id) )
        {
            QString match = pairsA[n->id];
            matchedA[n->id] = true;
            matchedB[match] = true;

            if(isInGroup(n->id, groupsA))
            // This is a many-
            {
                // All source many need to be matched
                QVector<QString> grpA;
				auto allGrpA = shapeA->groupsOf(n->id);
				for (auto j : allGrpA.front())
                    if(pairsA.contains(j))
                        grpA << j;

                if( isInGroup(match, groupsB) )
                // Many-to-many
                {
                    // Now we have a many-to-many that were matched
                    // Resolve all as a one-to-one (scheduler should fix timing..)
                    for(auto j : grpA)
                    {
                        gcorr->addLandmarks( QVector<QString>() << j, QVector<QString>() << pairsA[j] );
						matchedB[pairsA[j]] = true;
                    }
                }
                else
                // Many-to-one
                {
                    gcorr->addLandmarks( grpA, QVector<QString>() << match );
                }

                for(auto j : grpA) matchedA[j] = true;
            }
            else
            // This is a one-
            {
                if( isInGroup(match, groupsB) )
                // One-to-many
                {
                    // All target many need to be matched
                    QVector<QString> grpB;
					auto allGrpB = shapeB->groupsOf(match);
					for (auto j : allGrpB.front())
                        if(pairsB.contains(j))
                            grpB << j;

                    gcorr->addLandmarks( QVector<QString>() << n->id, grpB );
                    for(auto j : grpB) matchedB[j] = true;
                }
                else
                // One-to-one
                {
                    gcorr->addLandmarks( QVector<QString>() << n->id, QVector<QString>() << match );
                    matchedB[match] = true;
                }
            }
        }
    }

    // one-to-nothing
    for(auto nid : matchedA.keys()) if(!matchedA[nid]) gcorr->setNonCorresSource(nid);
    for(auto nid : matchedB.keys()) if(!matchedB[nid]) gcorr->setNonCorresTarget(nid);
}
void ribi::cmap::QtConceptMap::keyPressEvent(QKeyEvent *event)
{
  CheckInvariants();
  UpdateConceptMap();
  CheckInvariants();

  switch (event->key())
  {
    case Qt::Key_F1:
    case Qt::Key_F2:
    {
      const auto items = scene()->selectedItems();
      if (items.size() != 1) {
        break;
      }
      if (QtNode * const qtnode = dynamic_cast<QtNode*>(items.front()))
      {
        OnNodeKeyDownPressed(qtnode, event->key());
      }
    }
    break;
    #ifndef NDEBUG
    case Qt::Key_F9:
    {
      throw std::runtime_error("Exception forced by user");
    }
    #endif
    case Qt::Key_Delete:
    {
      UpdateConceptMap();
      if (GetVerbosity()) { TRACE("Pressing delete"); }
      try
      {
        DoCommand(new CommandDeleteSelected(m_conceptmap, scene(), m_tools));
      }
      catch (std::logic_error& e)
      {
        if (GetVerbosity()) { TRACE(e.what()); }
      }
    }
    return;
    case Qt::Key_Escape:
    {
      if (GetVerbosity()) { TRACE("Pressing Escape"); }
      //Only remove the 'new arrow' if present
      if (m_arrow->isVisible())
      {
        if (GetVerbosity()) { TRACE("Remove the new arrow"); }
        m_arrow->hide();
        assert(!m_arrow->isVisible());
        return;
      }
    }
    break;
    case Qt::Key_Equal:
      if (GetVerbosity()) { TRACE("Pressing Qt::Key_Equal"); }
      this->scale(1.1,1.1);
      break;
    case Qt::Key_Minus:
      if (GetVerbosity()) { TRACE("Pressing Qt::Key_Minus"); }
      this->scale(0.9,0.9);
      break;
    case Qt::Key_E:
      if (event->modifiers() & Qt::ControlModifier)
      {
        if (GetVerbosity()) { TRACE("Pressing CTRL-E"); }
        try { this->DoCommand(new CommandCreateNewEdgeBetweenTwoSelectedNodes(GetConceptMap(),m_mode,scene(),m_tools)); }
        catch (std::logic_error& ) {}
      }
      return;
    case Qt::Key_T:
      if (event->modifiers() & Qt::ControlModifier)
      {
        if (GetVerbosity()) { TRACE("Pressing CTRL-T"); }
        try
        {
          const auto cmd = new CommandToggleArrowTail(GetConceptMap(), scene());
          this->DoCommand(cmd);
        }
        catch (std::logic_error& e) {}
      }
      return;
    case Qt::Key_H:
      if (event->modifiers() & Qt::ControlModifier)
      {
        if (GetVerbosity()) { TRACE("Pressing CTRL-H"); }
        try
        {
          const auto cmd = new CommandToggleArrowHead(GetConceptMap(), scene());
          this->DoCommand(cmd);
        }
        catch (std::logic_error& e) {}
      }
      return;
    case Qt::Key_N:
      if (event->modifiers() & Qt::ControlModifier)
      {
        if (GetVerbosity()) { TRACE("Pressing CTRL-N"); }
        try { this->DoCommand(new CommandCreateNewNode(m_conceptmap,m_mode,scene(),m_tools,0.0,0.0)); }
        catch (std::logic_error& ) {}
      }
      return;
    case Qt::Key_Z:
      if (event->modifiers() & Qt::ControlModifier)
      {
        if (event->modifiers() & Qt::ShiftModifier)
        {
          this->m_undo.redo();
        }
        else
        {
          this->m_undo.undo();
        }
      }
      return;
    case Qt::Key_Question:
      if (GetVerbosity()) { TRACE("Pressing Qt::Key_Question"); }
      UpdateConceptMap();
      break;

  }

  for (auto qtedge: GetSelectedQtEdges(*GetScene())) {
    qtedge->keyPressEvent(event);
    qtedge->update();
  }
  QtKeyboardFriendlyGraphicsView::keyPressEvent(event);
  UpdateConceptMap();

  CheckInvariants();
}
nsresult
MediaPipelineFactory::GetOrCreateAudioConduit(
    const JsepTrackPair& aTrackPair,
    const JsepTrack& aTrack,
    RefPtr<MediaSessionConduit>* aConduitp)
{

  if (!aTrack.GetNegotiatedDetails()) {
    MOZ_ASSERT(false, "Track is missing negotiated details");
    return NS_ERROR_INVALID_ARG;
  }

  bool receiving = aTrack.GetDirection() == sdp::kRecv;

  RefPtr<AudioSessionConduit> conduit =
    mPCMedia->GetAudioConduit(aTrackPair.mLevel);

  if (!conduit) {
    conduit = AudioSessionConduit::Create();
    if (!conduit) {
      MOZ_MTLOG(ML_ERROR, "Could not create audio conduit");
      return NS_ERROR_FAILURE;
    }

    mPCMedia->AddAudioConduit(aTrackPair.mLevel, conduit);
  }

  if (!GetBestCodec(*aTrack.GetNegotiatedDetails())) {
    MOZ_MTLOG(ML_ERROR, "Can't set up a conduit with 0 codecs");
    return NS_ERROR_FAILURE;
  }

  size_t numCodecs = aTrack.GetNegotiatedDetails()->GetCodecCount();
  if (receiving) {
    PtrVector<AudioCodecConfig> configs;

    for (size_t i = 0; i < numCodecs; i++) {
      const JsepCodecDescription* cdesc =
        aTrack.GetNegotiatedDetails()->GetCodec(i);

      AudioCodecConfig* configRaw;
      nsresult rv = JsepCodecDescToCodecConfig(*cdesc, &configRaw);
      if (NS_FAILED(rv))
        return rv;

      configs.values.push_back(configRaw);
    }

    auto error = conduit->ConfigureRecvMediaCodecs(configs.values);

    if (error) {
      MOZ_MTLOG(ML_ERROR, "ConfigureRecvMediaCodecs failed: " << error);
      return NS_ERROR_FAILURE;
    }

    if (!aTrackPair.mSending) {
      // No send track, but we still need to configure an SSRC for receiver
      // reports.
      if (!conduit->SetLocalSSRC(aTrackPair.mRecvonlySsrc)) {
        MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
        return NS_ERROR_FAILURE;
      }
    }
  } else {
    // For now we only expect to have one ssrc per local track.
    auto ssrcs = aTrack.GetSsrcs();
    if (!ssrcs.empty()) {
      if (!conduit->SetLocalSSRC(ssrcs.front())) {
        MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
        return NS_ERROR_FAILURE;
      }
    }

    conduit->SetLocalCNAME(aTrack.GetCNAME().c_str());

    const JsepCodecDescription* cdesc =
      GetBestCodec(*aTrack.GetNegotiatedDetails());

    AudioCodecConfig* configRaw;
    nsresult rv = JsepCodecDescToCodecConfig(*cdesc, &configRaw);
    if (NS_FAILED(rv))
      return rv;

    ScopedDeletePtr<AudioCodecConfig> config(configRaw);
    auto error = conduit->ConfigureSendMediaCodec(config.get());
    if (error) {
      MOZ_MTLOG(ML_ERROR, "ConfigureSendMediaCodec failed: " << error);
      return NS_ERROR_FAILURE;
    }

    const SdpExtmapAttributeList::Extmap* audioLevelExt =
        aTrack.GetNegotiatedDetails()->GetExt(
            "urn:ietf:params:rtp-hdrext:ssrc-audio-level");

    if (audioLevelExt) {
      MOZ_MTLOG(ML_DEBUG, "Calling EnableAudioLevelExtension");
      error = conduit->EnableAudioLevelExtension(true, audioLevelExt->entry);

      if (error) {
        MOZ_MTLOG(ML_ERROR, "EnableAudioLevelExtension failed: " << error);
        return NS_ERROR_FAILURE;
      }
    }
  }

  *aConduitp = conduit;

  return NS_OK;
}
 inline const value_type& top() const { return front(); }
nsresult
MediaPipelineFactory::GetOrCreateVideoConduit(
    const JsepTrackPair& aTrackPair,
    const JsepTrack& aTrack,
    RefPtr<MediaSessionConduit>* aConduitp)
{

  if (!aTrack.GetNegotiatedDetails()) {
    MOZ_ASSERT(false, "Track is missing negotiated details");
    return NS_ERROR_INVALID_ARG;
  }

  bool receiving = aTrack.GetDirection() == sdp::kRecv;

  RefPtr<VideoSessionConduit> conduit =
    mPCMedia->GetVideoConduit(aTrackPair.mLevel);

  if (!conduit) {
    conduit = VideoSessionConduit::Create();
    if (!conduit) {
      MOZ_MTLOG(ML_ERROR, "Could not create video conduit");
      return NS_ERROR_FAILURE;
    }

    mPCMedia->AddVideoConduit(aTrackPair.mLevel, conduit);
  }

  if (!GetBestCodec(*aTrack.GetNegotiatedDetails())) {
    MOZ_MTLOG(ML_ERROR, "Can't set up a conduit with 0 codecs");
    return NS_ERROR_FAILURE;
  }

  size_t numCodecs = aTrack.GetNegotiatedDetails()->GetCodecCount();

  bool configuredH264 = false;
  if (receiving) {
    PtrVector<VideoCodecConfig> configs;

    for (size_t i = 0; i < numCodecs; i++) {
      const JsepCodecDescription* cdesc =
        aTrack.GetNegotiatedDetails()->GetCodec(i);

      // We can only handle configuring one recv H264 codec
      if (configuredH264 && (cdesc->mName == "H264")) {
        continue;
      }

      VideoCodecConfig* configRaw;
      nsresult rv = JsepCodecDescToCodecConfig(*cdesc, &configRaw);
      if (NS_FAILED(rv))
        return rv;

      UniquePtr<VideoCodecConfig> config(configRaw);
      if (EnsureExternalCodec(*conduit, config.get(), false)) {
        continue;
      }

      if (cdesc->mName == "H264") {
        configuredH264 = true;
      }
      configs.values.push_back(config.release());
    }

    auto error = conduit->ConfigureRecvMediaCodecs(configs.values);

    if (error) {
      MOZ_MTLOG(ML_ERROR, "ConfigureRecvMediaCodecs failed: " << error);
      return NS_ERROR_FAILURE;
    }

    if (!aTrackPair.mSending) {
      // No send track, but we still need to configure an SSRC for receiver
      // reports.
      if (!conduit->SetLocalSSRC(aTrackPair.mRecvonlySsrc)) {
        MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
        return NS_ERROR_FAILURE;
      }
    }
  } else {
    // For now we only expect to have one ssrc per local track.
    auto ssrcs = aTrack.GetSsrcs();
    if (!ssrcs.empty()) {
      if (!conduit->SetLocalSSRC(ssrcs.front())) {
        MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
        return NS_ERROR_FAILURE;
      }
    }

    conduit->SetLocalCNAME(aTrack.GetCNAME().c_str());

    const JsepCodecDescription* cdesc =
      GetBestCodec(*aTrack.GetNegotiatedDetails());

    VideoCodecConfig* configRaw;
    nsresult rv = JsepCodecDescToCodecConfig(*cdesc, &configRaw);
    if (NS_FAILED(rv))
      return rv;

    rv = ConfigureVideoCodecMode(aTrack,*conduit);
    if (NS_FAILED(rv)) {
      return rv;
    }

    // Take possession of this pointer
    ScopedDeletePtr<VideoCodecConfig> config(configRaw);

    if (EnsureExternalCodec(*conduit, config, true)) {
      MOZ_MTLOG(ML_ERROR, "External codec not available");
      return NS_ERROR_FAILURE;
    }

    auto error = conduit->ConfigureSendMediaCodec(config);

    if (error) {
      MOZ_MTLOG(ML_ERROR, "ConfigureSendMediaCodec failed: " << error);
      return NS_ERROR_FAILURE;
    }
  }

  *aConduitp = conduit;

  return NS_OK;
}
Example #27
0
/**
 * Computed the normalization for the input workspace. Results are stored in
 * m_normWS
 * @param otherValues
 * @param affineTrans
 */
void MDNormDirectSC::calculateNormalization(
    const std::vector<coord_t> &otherValues,
    const Kernel::Matrix<coord_t> &affineTrans) {
  constexpr double energyToK = 8.0 * M_PI * M_PI *
                               PhysicalConstants::NeutronMass *
                               PhysicalConstants::meV * 1e-20 /
                               (PhysicalConstants::h * PhysicalConstants::h);
  const auto &exptInfoZero = *(m_inputWS->getExperimentInfo(0));
  typedef Kernel::PropertyWithValue<std::vector<double>> VectorDoubleProperty;
  auto *rubwLog =
      dynamic_cast<VectorDoubleProperty *>(exptInfoZero.getLog("RUBW_MATRIX"));
  if (!rubwLog) {
    throw std::runtime_error(
        "Wokspace does not contain a log entry for the RUBW matrix."
        "Cannot continue.");
  } else {
    Kernel::DblMatrix rubwValue(
        (*rubwLog)()); // includes the 2*pi factor but not goniometer for now :)
    m_rubw = exptInfoZero.run().getGoniometerMatrix() * rubwValue;
    m_rubw.Invert();
  }
  const double protonCharge = exptInfoZero.run().getProtonCharge();

  auto instrument = exptInfoZero.getInstrument();
  std::vector<detid_t> detIDs = instrument->getDetectorIDs(true);
  // Prune out those that are part of a group and simply leave the head of the
  // group
  detIDs = removeGroupedIDs(exptInfoZero, detIDs);

  // Mapping
  const int64_t ndets = static_cast<int64_t>(detIDs.size());
  bool haveSA = false;
  API::MatrixWorkspace_const_sptr solidAngleWS =
      getProperty("SolidAngleWorkspace");
  detid2index_map solidAngDetToIdx;
  if (solidAngleWS != nullptr) {
    haveSA = true;
    solidAngDetToIdx = solidAngleWS->getDetectorIDToWorkspaceIndexMap();
  }

  auto prog = make_unique<API::Progress>(this, 0.3, 1.0, ndets);
  PARALLEL_FOR_NO_WSP_CHECK()
  for (int64_t i = 0; i < ndets; i++) {
    PARALLEL_START_INTERUPT_REGION

    const auto detID = detIDs[i];
    double theta(0.0), phi(0.0);
    bool skip(false);
    try {
      auto spectrum = getThetaPhi(detID, exptInfoZero, theta, phi);
      if (spectrum->isMonitor() || spectrum->isMasked())
        continue;
    } catch (
        std::exception &) // detector might not exist or has no been included
                          // in grouping
    {
      skip = true; // Intel compiler has a problem with continue inside a catch
                   // inside openmp...
    }
    if (skip)
      continue;

    // Intersections
    auto intersections = calculateIntersections(theta, phi);
    if (intersections.empty())
      continue;

    // Get solid angle for this contribution
    double solid = protonCharge;
    if (haveSA) {
      solid = solidAngleWS->readY(solidAngDetToIdx.find(detID)->second)[0] *
              protonCharge;
    }
    // Compute final position in HKL
    const size_t vmdDims = intersections.front().size();
    // pre-allocate for efficiency and copy non-hkl dim values into place
    std::vector<coord_t> pos(vmdDims + otherValues.size() + 1);
    std::copy(otherValues.begin(), otherValues.end(), pos.begin() + vmdDims);
    pos.push_back(1.);
    auto intersectionsBegin = intersections.begin();
    for (auto it = intersectionsBegin + 1; it != intersections.end(); ++it) {
      const auto &curIntSec = *it;
      const auto &prevIntSec = *(it - 1);
      // the full vector isn't used so compute only what is necessary
      double delta =
          (curIntSec[3] * curIntSec[3] - prevIntSec[3] * prevIntSec[3]) /
          energyToK;
      if (delta < 1e-10)
        continue; // Assume zero contribution if difference is small

      // Average between two intersections for final position
      std::transform(curIntSec.getBareArray(),
                     curIntSec.getBareArray() + vmdDims,
                     prevIntSec.getBareArray(), pos.begin(),
                     VectorHelper::SimpleAverage<coord_t>());

      // transform kf to energy transfer
      pos[3] = static_cast<coord_t>(m_Ei - pos[3] * pos[3] / energyToK);
      std::vector<coord_t> posNew = affineTrans * pos;
      size_t linIndex = m_normWS->getLinearIndexAtCoord(posNew.data());
      if (linIndex == size_t(-1))
        continue;

      // signal = integral between two consecutive intersections *solid angle
      // *PC
      double signal = solid * delta;

      PARALLEL_CRITICAL(updateMD) {
        signal += m_normWS->getSignalAt(linIndex);
        m_normWS->setSignalAt(linIndex, signal);
      }
    }
    prog->report();

    PARALLEL_END_INTERUPT_REGION
  }
  PARALLEL_CHECK_INTERUPT_REGION
}
status_t BufferQueue::queueBuffer(int buf,
        const QueueBufferInput& input, QueueBufferOutput* output) {
    ATRACE_CALL();
    ATRACE_BUFFER_INDEX(buf);

    Rect crop;
    uint32_t transform;
    int scalingMode;
    int64_t timestamp;
    sp<Fence> fence;

    input.deflate(&timestamp, &crop, &scalingMode, &transform, &fence);

    if (fence == NULL) {
        ST_LOGE("queueBuffer: fence is NULL");
        return BAD_VALUE;
    }

    ST_LOGV("queueBuffer: slot=%d time=%#llx crop=[%d,%d,%d,%d] tr=%#x "
            "scale=%s",
            buf, timestamp, crop.left, crop.top, crop.right, crop.bottom,
            transform, scalingModeName(scalingMode));

    sp<ConsumerListener> listener;

    { // scope for the lock
        Mutex::Autolock lock(mMutex);
        if (mAbandoned) {
            ST_LOGE("queueBuffer: BufferQueue has been abandoned!");
            return NO_INIT;
        }
        int maxBufferCount = getMaxBufferCountLocked();
        if (buf < 0 || buf >= maxBufferCount) {
            ST_LOGE("queueBuffer: slot index out of range [0, %d]: %d",
                    maxBufferCount, buf);
            return -EINVAL;
        } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
            ST_LOGE("queueBuffer: slot %d is not owned by the client "
                    "(state=%d)", buf, mSlots[buf].mBufferState);
            return -EINVAL;
        } else if (!mSlots[buf].mRequestBufferCalled) {
            ST_LOGE("queueBuffer: slot %d was enqueued without requesting a "
                    "buffer", buf);
            return -EINVAL;
        }

        const sp<GraphicBuffer>& graphicBuffer(mSlots[buf].mGraphicBuffer);
        Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
        Rect croppedCrop;
        crop.intersect(bufferRect, &croppedCrop);
        if (croppedCrop != crop) {
            ST_LOGE("queueBuffer: crop rect is not contained within the "
                    "buffer in slot %d", buf);
            return -EINVAL;
        }

        if (mSynchronousMode) {
            // In synchronous mode we queue all buffers in a FIFO.
            mQueue.push_back(buf);

            // Synchronous mode always signals that an additional frame should
            // be consumed.
            listener = mConsumerListener;
        } else {
            // In asynchronous mode we only keep the most recent buffer.
            if (mQueue.empty()) {
                mQueue.push_back(buf);

                // Asynchronous mode only signals that a frame should be
                // consumed if no previous frame was pending. If a frame were
                // pending then the consumer would have already been notified.
                listener = mConsumerListener;
            } else {
                Fifo::iterator front(mQueue.begin());
                // buffer currently queued is freed
                mSlots[*front].mBufferState = BufferSlot::FREE;
                // and we record the new buffer index in the queued list
                *front = buf;
            }
        }

        mSlots[buf].mTimestamp = timestamp;
        mSlots[buf].mCrop = crop;
        mSlots[buf].mTransform = transform;
        mSlots[buf].mFence = fence;

        switch (scalingMode) {
            case NATIVE_WINDOW_SCALING_MODE_FREEZE:
            case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
            case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
                break;
            default:
                ST_LOGE("unknown scaling mode: %d (ignoring)", scalingMode);
                scalingMode = mSlots[buf].mScalingMode;
                break;
        }

        mSlots[buf].mBufferState = BufferSlot::QUEUED;
        mSlots[buf].mScalingMode = scalingMode;
        mFrameCounter++;
        mSlots[buf].mFrameNumber = mFrameCounter;

        mBufferHasBeenQueued = true;
        mDequeueCondition.broadcast();

        output->inflate(mDefaultWidth, mDefaultHeight, mTransformHint,
                mQueue.size());

        ATRACE_INT(mConsumerName.string(), mQueue.size());
    } // scope for the lock

    // call back without lock held
    if (listener != 0) {
        listener->onFrameAvailable();
    }
    return NO_ERROR;
}
Example #29
0
/*
	Function implements scheduling algorithm.
	This can be overloaded.
*/
void scheduling (Scheduler* sched)
{
	char log_data[100];
	PCB *prev_PCB,*temp_PCB;
	unsigned old_id,new_id;
	int state;

	//printf ("Scheduling about to start. mutex1:%x\n",mutex1);
	pthread_mutex_lock(&mutex1);
		printf ("SCHEDULING ");
		//printf ("SCHEDULING PCB_list length:%d ",size(sched->PCB_list));
		//display_queue (sched->PCB_list);
		fflush (stdout);
		//This if is for experimental purposes
		if (sched->PCB_list->size <= 1) //There is only one process in the system.
		{
			printf ("Nothing to schedule as only one process in the system.\n");
			pthread_mutex_unlock(&mutex1);
			return;
		}
		else
		{
			old_id = sched->cur_PCB->p->internal_id;
			
			//Simple round robin policy.
			prev_PCB = front (&sched->PCB_list);
//			printf ("SCHEDULE OLD->mutex:%x\n",prev_PCB->p->mutex);
			pthread_mutex_lock (&prev_PCB->p->state_mutex);
				if ( prev_PCB->p->state == RUNNING )
					prev_PCB->p->state = READY;
			pthread_mutex_unlock (&prev_PCB->p->state_mutex);
			
			push (&sched->PCB_list,prev_PCB);
			//printf ("WELL OLD IS GONE!!!!\n");
			//FIND NEW Scheduled process
			unsigned counter=0, PCB_list_size = size(sched->PCB_list);
			while (1)
			{
				counter++;
				sched->cur_PCB = pick_front (sched->PCB_list);
				
				new_id = sched->cur_PCB->p->internal_id;
				pthread_mutex_lock (&sched->cur_PCB->p->state_mutex);
					state = sched->cur_PCB->p->state;
				pthread_mutex_unlock (&sched->cur_PCB->p->state_mutex);
//				printf (" next p:%d state %d ",sched->cur_PCB->p->internal_id,sched->cur_PCB->p->state);
				if ( state == READY) //FOUND THE NEXT READY PROCESS
					break;
				temp_PCB = front (&sched->PCB_list);
				push (&sched->PCB_list,temp_PCB);
				if (counter >= PCB_list_size)
				{
					printf ("No process is in READY state. Nothing to schedule.\n");
					pthread_mutex_unlock(&mutex1);
					return;
				}
//				printf ("Looking for the next READY process. counter:%d\n",counter);
			}
			printf ("OLD:%u NEW:%u\n",old_id,new_id);

			pthread_mutex_lock (&sched->cur_PCB->p->mutex);
			if (sched->cur_PCB->p->waiting == 1)
						pthread_cond_signal (& sched->cur_PCB->p->cond);
//				printf ("SIGNALED\n");
				pthread_mutex_lock (&sched->cur_PCB->p->state_mutex);
					sched->cur_PCB->p->state = RUNNING;
				pthread_mutex_unlock (&sched->cur_PCB->p->state_mutex);
			pthread_mutex_unlock (&sched->cur_PCB->p->mutex);
		}
	pthread_mutex_unlock(&mutex1);
}
Example #30
0
int main(int argc, char* argv[]){
   int i, s, max, min, d, n=35;
   //int n=35;
   List  C = newList(); // central vertices
   List  P = newList(); // peripheral vertices
   List  E = newList(); // eccentricities
   Graph G = newGraph(n);


   // Build graph G

   for(i=1; i<n; i++){
      if( i%7!=0 )
        addEdge(G, i, i+1);
      if( i<=28  )
       addEdge(G, i, i+7);
   }
   addEdge(G, 9, 31);
   addEdge(G, 17, 13);
   addEdge(G, 14, 33);

   // Print adjacency list representation of G
   printGraph(stdout, G);

   // Calculate the eccentricity of each vertex
   for(s=1; s<=n; s++){
      BFS(G, s);
      max = getDist(G, 1);
      for(i=2; i<=n; i++){
         d = getDist(G, i);
         max = ( max<d ? d : max );
      }
      append(E, max);
   }
printf("Source of G %d Size of G %d Order of G %d",getSource(G),getSize(G),getOrder(G));

   // Determine the Radius and Diameter of G, as well as the Central and
   // Peripheral vertices.
   append(C, 1);
   append(P, 1);
   min = max = front(E);
   moveTo(E,0);
   moveNext(E);
   for(i=2; i<=n; i++){
      d = getElement(E);
      if( d==min ){
         append(C, i);
      }else if( d<min ){
         min = d;
         clear(C);
         append(C, i);
      }
      if( d==max ){
         append(P, i);
      }else if( d>max ){
         max = d;
         clear(P);
         append(P, i);
      }
      moveNext(E);
   }

   // Print results
   printf("\n");
   printf("Radius = %d\n", min);
   printf("Central vert%s: ", length(C)==1?"ex":"ices");
   printList(stdout, C);
   printf("\n");
   printf("Diameter = %d\n", max);
   printf("Peripheral vert%s: ", length(P)==1?"ex":"ices");
   printList(stdout, P);
   printf("\n");

   freeList(&C);
   freeList(&P);
   freeList(&E);
   freeGraph(&G);
//testing undirected graph
printf("testing undirected graph G2\n");
  Graph G2= newGraph(6);
   addEdge(G2, 1, 2);
   addEdge(G2, 1, 3);
   addEdge(G2, 2, 4);
   addEdge(G2, 2, 5);
   addEdge(G2, 2, 6);
   addEdge(G2, 3, 4);
   addEdge(G2, 4, 5);
   addEdge(G2, 5, 6);

   //test access functions
    printGraph(stdout, G2);
    BFS(G2, 3);

printf("Source of G2:%d Size of G2:%d Order of G2:%d \n",getSource(G2),getSize(G2),getOrder(G2));
printf("getParent(G2,6)%d\n",getParent(G2,6));
List Pa=newList();
printf("getDist 3 to 6:%d\n",getDist(G2,6));
printf("getPath 3 to 6:\n");
getPath(Pa,G2,6);
printList(stdout,Pa);

BFS(G2, 1);
printf("getDist 1 to 5:%d\n",getDist(G2,5));
clear(Pa);
getPath(Pa,G2,5);
printf("getPath 1 to 5:\n");
printList(stdout,Pa);

printf("makeNull(G2)\n");
makeNull(G2);
printGraph(stdout, G2);
printf("Source of G:%d Size of G:%d Order of G:%d \n",getSource(G2),getSize(G2),getOrder(G2));
printf("getParent(G2,5)%d\n",getParent(G2,5));
printf("getDist(G2,6)%d\n",getDist(G2,6));

   freeList(&Pa);
   freeGraph(&G2);


//testing directed graph
printf("\ntesting directed graph G3\n");
Graph G3=newGraph(4);

addArc(G3,1,2);
addArc(G3,1,3);
addArc(G3,3,2);
addArc(G3,3,4);
addArc(G3,4,1);

  //test access functions
printGraph(stdout, G3);
   BFS(G3, 3);

printf("Source of G3:%d Size of G3:%d Order of G3:%d \n",getSource(G3),getSize(G3),getOrder(G3));
printf("getDist 3 to 6:%d\n",getDist(G3,1));
printf("getDist 3 to 2:%d\n",getDist(G3,2));
printf("getParent(3):%d\n",getParent(G3,3));


List Pa2=newList();
printf("getPath 3 to 1:\n");
getPath(Pa2,G3,1);
printList(stdout,Pa2);
printf("getParent(1):%d\n",getParent(G3,1));


   // Free objects


   freeList(&Pa2);
   freeGraph(&G3);

   return(0);
}