//hardcoded 3 vaste parameters maken. met daarin de waarden die worden meegegeven met de functie.
    void JsonClass::piData(char macaddress[],char cpu_temp[],char free_memory[],char piData[]){

        char * data1,*data2,*data3,*data4,*data5;

        buildData("macaddress",macaddress,data1);
        buildData("cpu_temp",cpu_temp,data2);
        buildData("freememory",free_memory,data3);

        appendData(data1,data2,data4);
        appendData(data4,data3,data5);

        eindeBuildData(data5,piData);
    }
Ejemplo n.º 2
0
/*
  function: addItem
  Input: line: line from transaction file.
  Return: 0 on complete. (value not used)
 
  Adds an item to the btree if there is not 
  already an item with same code in tree.
  
  Uses buildData() from btree.c to make item, 
  line[11] is used to skip past transation name 
  in line and get to the new item data.

  Also Uses search(), insertSearch(), and insert() from btree.c

 */
int addItem(char *line){

 int i; 
 struct Node root;
 struct Node found;
 struct Data item;

 getNode(0, &root);

 buildData(&item, &line[11]);
 
 i = search(&root, item.code, &found);

 if(i != -1){
   printf("ERROR: Cannot add item, ");
   printf("there is already an item with code: %s.\n", item.code);
   return 0;
 }

 for(i = 0; i < 11; i++){
   if ((item.cate[i] >= 97) && (item.cate[i] <= 122)){
     item.cate[i] = item.cate[i] - 32;
   }
 }

 insertSearch(&root, &item, &found);

 insert(&found, &item);

 return 0;
}
Ejemplo n.º 3
0
DataChunk
debugLogLoad()
{
    DataChunk out1;
    fileLoad(out1, debugLogOldPath()).log();

    DataChunk out2;
    fileLoad(out2, debugLogPath()).log();

    return buildData({out1, out2});
}
Ejemplo n.º 4
0
PyObject *wrap_setEntry(PyObject *o, PyObject * args, PyObject * kwargs)
{
	const char *module_id;
	ppk_entry entry;
	ppk_data data;
	unsigned int type, data_type;
	const char *host = 0, *login = 0, *app_name = 0, *username = 0, *item = 0, *string = 0, *blob = 0;
	int size = 0;
	unsigned int port = 0;
	unsigned int flags = 0;
	static char *kwlist[] = { "module_id", "type", "data_type", "host", "login", "port", "app_name", "username", "item", "string", "blob", "flags", NULL };
	int ok = PyArg_ParseTupleAndKeywords(args, kwargs, "sII|ssHsssst#I", kwlist, &module_id,
			&type, &data_type, &host, &login, &port, &app_name, &username, &item, &string, &blob, &size, &flags);

	switch (type)
	{
	case ppk_network:
		ok = ok && host && login && port && ! app_name && ! username && ! item;
		break;
	case ppk_application:
		ok = ok && ! host && ! login && ! port && app_name && username && ! item;
		break;
	case ppk_item:
		ok = ok && ! host && ! login && ! port && ! app_name && ! username && item;
		break;
	default:
		ok = 0;
	};

	switch (data_type)
	{
	case ppk_string:
		ok = ok && string && ! blob;
		break;
	case ppk_blob:
		ok = ok && blob && size && ! string;
		break;
	default:
		ok = 0;
	}

	if (! ok)
	{
	    PyErr_BadArgument();
	    return 0;
	}

	buildEntry(&entry, type, host, login, port, app_name, username, item);
	buildData(&data, data_type, string, blob, size);

	return (ppk_setEntry(module_id, entry, data, flags)) ? Py_True : Py_False;
}
QQuickShaderEffectNode *QQuickCustomParticle::prepareNextFrame(QQuickShaderEffectNode *rootNode)
{
    if (!rootNode)
        rootNode = buildCustomNodes();

    if (!rootNode)
        return 0;

    if (m_dirtyProgram) {
        const bool isES = QOpenGLContext::currentContext()->isOpenGLES();

        QQuickShaderEffectMaterial *material = static_cast<QQuickShaderEffectMaterial *>(rootNode->material());
        Q_ASSERT(material);

        Key s = m_common.source;
        QSGShaderSourceBuilder builder;
        if (s.sourceCode[Key::FragmentShader].isEmpty()) {
            builder.appendSourceFile(QStringLiteral(":/particles/shaders/customparticle.frag"));
            if (isES)
                builder.removeVersion();
            s.sourceCode[Key::FragmentShader] = builder.source();
            builder.clear();
        }

        builder.appendSourceFile(QStringLiteral(":/particles/shaders/customparticletemplate.vert"));
        if (isES)
            builder.removeVersion();

        if (s.sourceCode[Key::VertexShader].isEmpty())
            builder.appendSourceFile(QStringLiteral(":/particles/shaders/customparticle.vert"));
        s.sourceCode[Key::VertexShader] = builder.source() + s.sourceCode[Key::VertexShader];

        s.className = metaObject()->className();

        material->setProgramSource(s);
        material->attributes = m_common.attributes;
        foreach (QQuickShaderEffectNode* node, m_nodes)
            node->markDirty(QSGNode::DirtyMaterial);

        m_dirtyProgram = false;
        m_dirtyUniforms = true;
    }

    m_lastTime = m_system->systemSync(this) / 1000.;
    if (true) //Currently this is how we update timestamp... potentially over expensive.
        buildData(rootNode);
    return rootNode;
}
Ejemplo n.º 6
0
	/**
	* @param ntl Near top left point
	* @param ntr Near top right point
	* @param nbl Near bottom left point
	* @param nbr Near bottom right point
	* @param ftl Far top left point
	* @param ftr Far top right point
	* @param fbl Far bottom left point
	* @param fbr Far bottom right point
	*/
	template<class T> void Frustum<T>::buildFrustum(const Point3<T> &ntl, const Point3<T> &ntr, const Point3<T> &nbl, const Point3<T> &nbr,
		const Point3<T> &ftl, const Point3<T> &ftr, const Point3<T> &fbl, const Point3<T> &fbr)
	{
		//building frustum points
		frustumPoints[0] = ntl;
		frustumPoints[1] = ntr;
		frustumPoints[2] = nbl;
		frustumPoints[3] = nbr;
		frustumPoints[4] = ftl;
		frustumPoints[5] = ftr;
		frustumPoints[6] = fbl;
		frustumPoints[7] = fbr;

		//building frustum data
		buildData();
	}
void ObjectOverviewModel::build() {
	cleanUp();

	auto ioManager = model_.ioManager();
	ioManager->createProjectStructure();
	std::string wdir = ioManager->getWorkingDirectory();
	auto assets = ioManager->findFolder(wdir, "assets");
	if(assets.empty()) {
		std::cout << "assets not found? this wasn't supposed to happen" << std::endl;
	} else
		buildAssets(ioManager, assets);

	auto data = ioManager->findFolder(wdir, "data");
	if(data.empty()) {
		std::cout << "data not found? this wasn't supposed to happen" << std::endl;
	} else
		buildData(ioManager, data);
}
Ejemplo n.º 8
0
	/**
	* Frustum builder from angle, ratio and near/far distances.
	* Default frustum position: x=0, y=0, z=0
	* Default frustum view direction: z axis
	* Default frustum up vector: y axis
	*/
	template<class T> void Frustum<T>::buildFrustum(T angle, T ratio, T nearDistance, T farDistance)
	{
		//half distance of near and far planes
		T halfFovy = (angle/360.0)*PI_VALUE; //half angle in radian
		auto tang = (T)std::tan(halfFovy);
		T nearHalfHeight = nearDistance * tang;
		T nearHalfWidth = nearHalfHeight * ratio;
		T farHalfHeight = farDistance * tang;
		T farHalfWidth = farHalfHeight * ratio;

		//building frustum points
		frustumPoints[0] = Point3<T>(-nearHalfWidth, nearHalfHeight, -nearDistance); //ntl
		frustumPoints[1] = Point3<T>(nearHalfWidth, nearHalfHeight, -nearDistance); //ntr
		frustumPoints[2] = Point3<T>(-nearHalfWidth, -nearHalfHeight, -nearDistance); //nbl
		frustumPoints[3] = Point3<T>(nearHalfWidth, -nearHalfHeight, -nearDistance); //nbr
		frustumPoints[4] = Point3<T>(-farHalfWidth, farHalfHeight, -farDistance); //ftl
		frustumPoints[5] = Point3<T>(farHalfWidth, farHalfHeight, -farDistance); //ftr
		frustumPoints[6] = Point3<T>(-farHalfWidth, -farHalfHeight, -farDistance); //fbl
		frustumPoints[7] = Point3<T>(farHalfWidth, -farHalfHeight, -farDistance); //fbr

		//building frustum data
		buildData();
	}
    //hardcoded vaster parameters maken. De waarden worden meegegeven met de functie.
    void JsonClass::trexData(char battery_voltage[], char motor_current_left[], char motor_current_right[], char encoder_count_left[], char encoder_count_right[], char accelero[], char impact[], char trexData[]){

        char * dataBat, *dataML, *dataMR, *dataEL, *dataER, *dataAc, *dataIm, *data1; 

        buildData("Batttery_voltage",battery_voltage,dataBat);
        buildData("Motor_current_left",motor_current_left,dataML);
        buildData("Motor_current_right",motor_current_right,dataMR);
        buildData("Encoder_count_left",encoder_count_left,dataEL);
        buildData("Encoder_count_right",encoder_count_right,dataER);
        buildData("Accelero",accelero,dataAc);
        buildData("Impact",impact,dataIm);

        appendData(dataBat,dataML,data1);
        appendData(data1,dataMR,data1);
        appendData(data1,dataEL,data1);
        appendData(data1,dataER,data1);
        appendData(data1,dataAc,data1);
        appendData(data1,dataIm,data1);

        eindeBuildData(data1,trexData);

    }
Ejemplo n.º 10
0
/**
 * This function implements a strategy similar to the one used in the 
 * centralized case in NamdCentLB.
 */
CLBMigrateMsg* NamdHybridLB::GrpLevelStrategy(LDStats* stats) {
  int numProcessors = stats->nprocs();	// number of processors at group level
  int numPatches = PatchMap::Object()->numPatches();
  ComputeMap *computeMap = ComputeMap::Object();
  const int numComputes = computeMap->numComputes();
  const int numGroupComputes = stats->n_migrateobjs;
  const SimParameters* simParams = Node::Object()->simParameters;

  if ( ! processorArray ) processorArray = new processorInfo[numProcessors];
  // these data structures are global and need to be distributed
  if ( ! patchArray ) patchArray = new patchInfo[numPatches];
  if ( ! computeArray ) computeArray = new computeInfo[numGroupComputes];
  if ( ! from_procs ) from_procs = new int[numGroupComputes];

  int nMoveableComputes = buildData(stats);
  CmiAssert(nMoveableComputes <= numGroupComputes);


#if LDB_DEBUG
#define DUMP_LDBDATA 1
#define LOAD_LDBDATA 1
#endif

#if DUMP_LDBDATA 
  dumpDataASCII("ldbd_before", numProcessors, numPatches, nMoveableComputes);
#elif LOAD_LDBDATA
  loadDataASCII("ldbd_before.5", numProcessors, numPatches, nMoveableComputes);
  // CkExit();
#endif

  double averageLoad = 0.;
  double avgCompute;
  double maxCompute;
  int maxComputeId;
  int numPesAvailable;
  {
   int i;
   double total = 0.;
   maxCompute = 0.;
   int maxi = 0;
   for (i=0; i<nMoveableComputes; i++) {
      double load = computeArray[i].load;
      total += load;
      if ( load > maxCompute ) { maxCompute = load;  maxi = i; }
   }
   avgCompute = total / nMoveableComputes;
   maxComputeId = computeArray[maxi].handle.id.id[0];

    int P = stats->nprocs();
   numPesAvailable = 0;
   for (i=0; i<P; i++) {
      if (processorArray[i].available) {
        ++numPesAvailable;
        total += processorArray[i].backgroundLoad;
      }
   }
   if (numPesAvailable == 0)
     NAMD_die("No processors available for load balancing!\n");

   averageLoad = total/numPesAvailable;
  }

  int i_split = 0;
  double maxUnsplit = 0.;

  if ( step() == 1 ) {
    for (int i=0; i<nMoveableComputes; i++) {
      const int cid = computeArray[i].handle.id.id[0];
      if ( computeMap->numPartitions(cid) == 0 ) {
        const double load = computeArray[i].load;
        if ( load > maxUnsplit ) maxUnsplit = load;
        continue;
      }
      ++i_split;
    }
  }

  {
    SplitComputesMsg *msg = new(i_split,i_split) SplitComputesMsg;
    msg->maxUnsplit = maxUnsplit;
    msg->averageLoad = averageLoad;
    msg->avgCompute = avgCompute;
    msg->maxCompute = maxCompute;
    msg->maxComputeId = maxComputeId;
    msg->nMoveableComputes = nMoveableComputes;
    msg->numPesAvailable = numPesAvailable;
    msg->n = i_split;

    if ( step() == 1 ) {
      i_split = 0;
      for (int i=0; i<nMoveableComputes; i++) {
        computeArray[i].processor = computeArray[i].oldProcessor;
        const int cid = computeArray[i].handle.id.id[0];
        if ( computeMap->numPartitions(cid) == 0 ) {
          continue;
        }
        msg->cid[i_split] = cid;
        msg->load[i_split] = computeArray[i].load;
        ++i_split;
      }
    }

    thisProxy[0].splitComputes(msg);
  }

  if ( step() == 1 ) {
    // compute splitting only
  } else if (simParams->ldbStrategy == LDBSTRAT_DEFAULT) { // default
    if (step() < 4)
      TorusLB(computeArray, patchArray, processorArray,
                  nMoveableComputes, numPatches, numProcessors);
    else
      RefineTorusLB(computeArray, patchArray, processorArray,
                  nMoveableComputes, numPatches, numProcessors, 1);
  } else if (simParams->ldbStrategy == LDBSTRAT_COMPREHENSIVE) {
    TorusLB(computeArray, patchArray, processorArray,
                  nMoveableComputes, numPatches, numProcessors);
  } else if (simParams->ldbStrategy == LDBSTRAT_REFINEONLY) {
    RefineTorusLB(computeArray, patchArray, processorArray,
                  nMoveableComputes, numPatches, numProcessors, 1);
  } else if (simParams->ldbStrategy == LDBSTRAT_OLD) {
    NAMD_die("Old load balancer strategy is not compatible with hybrid balancer.");
    if (step() < 4)
      Alg7(computeArray, patchArray, processorArray,
                  nMoveableComputes, numPatches, numProcessors);
    else
      RefineOnly(computeArray, patchArray, processorArray,
                  nMoveableComputes, numPatches, numProcessors);
  }

#if LDB_DEBUG && USE_TOPOMAP
  TopoManager tmgr;
  int pe1, pe2, pe3, hops=0;
  /* This is double counting the hops
  for(int i=0; i<nMoveableComputes; i++)
  {
    pe1 = computeArray[i].processor;
    pe2 = patchArray[computeArray[i].patch1].processor;
    pe3 = patchArray[computeArray[i].patch2].processor;
    hops += tmgr.getHopsBetweenRanks(pe1, pe2);
    if(computeArray[i].patch1 != computeArray[i].patch2)
      hops += tmgr.getHopsBetweenRanks(pe1, pe3);  
  }*/
  for (int i=0; i<numPatches; i++)  {
    //int num = patchArray[i].proxiesOn.numElements();
    pe1 = patchArray[i].processor;
    Iterator nextProc;
    processorInfo *p = (processorInfo *)patchArray[i].proxiesOn.iterator((Iterator *)&nextProc);
    while (p) {
      pe2 = p->Id;
      hops += tmgr.getHopsBetweenRanks(pe1, pe2);
      p = (processorInfo *)patchArray[i].proxiesOn.next((Iterator*)&nextProc);
    }
  }
  CkPrintf("Load Balancing: Number of Hops: %d\n", hops);
#endif

#if DUMP_LDBDATA
  dumpDataASCII("ldbd_after", numProcessors, numPatches, nMoveableComputes);
#elif LOAD_LDBDATA
  dumpDataASCII("ldbd_after.5", numProcessors, numPatches, nMoveableComputes);
  // loadDataASCII("ldbd_after", numProcessors, numPatches, nMoveableComputes);
  // CkExit();
#endif

  // For error checking:
  // Count up computes, to see if somebody doesn't have any computes
  int i;
#if 0
  int* computeCount = new int[numProcessors];
  for(i=0; i<numProcessors; i++)
    computeCount[i]=0;
  for(i=0; i<nMoveableComputes; i++)
    computeCount[computeArray[i].processor]++;
  for(i=0; i<numProcessors; i++) {
    if (computeCount[i]==0)
      iout << iINFO <<"Warning: Processor " << i 
	   << " has NO moveable computes.\n" << endi;
  }
  delete [] computeCount;
#endif
  
  CkVec<MigrateInfo *> migrateInfo;
  for(i=0;i<nMoveableComputes;i++) {
    if (computeArray[i].processor != from_procs[i]+stats->procs[0].pe) {
      /* CkPrintf("[%d] Obj %d migrating from %d (%d) to %d\n",
                     CkMyPe(),computeArray[i].handle.id.id[0],
			 from_procs[i], computeArray[i].oldProcessor, computeArray[i].processor); */
      MigrateInfo *migrateMe = new MigrateInfo;
      migrateMe->obj = computeArray[i].handle;
      //migrateMe->from_pe = computeArray[i].oldProcessor;
      int frompe = from_procs[i];
      if (frompe == numProcessors)
        frompe = -1;
      else
        frompe = frompe + stats->procs[0].pe;
      migrateMe->from_pe = frompe;
      migrateMe->to_pe = computeArray[i].processor;
      if (frompe == -1) {
          // don't know yet which processor this compute belongs to, but
	  // inform receiver
        LDObjData obj;
        obj.handle = computeArray[i].handle;
        thisProxy[computeArray[i].processor].ObjMigrated(obj, NULL, 0, currentLevel-1);
      } 
      migrateInfo.insertAtEnd(migrateMe);

      // sneak in updates to ComputeMap
      //ERASE CkPrintf("%d setting %d to processor %d\n",CkMyPe(),computeArray[i].handle.id.id[0],computeArray[i].processor);
      computeMap->setNewNode(computeArray[i].handle.id.id[0],
				computeArray[i].processor);
    }
  }
  // CkPrintf("LOAD BALANCING READY %d\n",CkMyPe()); 

  LBMigrateMsg* msg;
  msg = createMigrateMsg(migrateInfo, numProcessors);

  peLoads = new double [numProcessors]; 
  startPE = processorArray[0].Id;
  endPE = processorArray[numProcessors-1].Id;
  // CkPrintf("[%d] numProcessors=%d, %d to %d\n",CkMyPe(),numProcessors,processorArray[0].Id,processorArray[numProcessors-1].Id);
  for (i=0; i<numProcessors; i++) {
	peLoads[i] = processorArray[i].load;
  }


  delete [] from_procs;
  delete [] processorArray;
  delete [] patchArray;
  delete [] computeArray;

  from_procs = NULL;
  processorArray = NULL;
  patchArray = NULL;
  computeArray = NULL;
  
  return msg;

}
Ejemplo n.º 11
0
Node * buildNode(FILE * fin, void *(*buildData)(FILE * in) ){

	Node * nn = (Node *)calloc(1, sizeof(Node));
	(*nn).data = buildData(fin);
	return nn;
}
Ejemplo n.º 12
0
bool NetMgr::init()
{
    _genID.initConfig(ServerConfig::getRef().getPlatID(), ServerConfig::getRef().getAreaID());
    return buildData() && loadData();
}
int main()
{

  // init variables
  double error = 0.;
  int truecnt = 0;
  int times,timed;
  
  // print useful info for reference
  std::cout << "\n" << "hidden neurons: " << "\t \t" << HIDDEN << std::endl;
  // init random number generator
  srand((int)time(NULL));  

  // create network
  std::cout << "initializing network..." << "\t \t";
  NeuralNet DigitNet;

  NeuralLayer * pHiddenLayer1 = new NeuralTanhLayer(INPUT,HIDDEN);
  DigitNet.addLayer( pHiddenLayer1 );
  NeuralLayer * pOutputLayer = new NeuralSoftmaxLayer(HIDDEN,OUTPUT);
  DigitNet.addLayer( pOutputLayer );

  // set output type:
  // SCALAR = tanh or sigmoid output layer (use one output neuron)
  // PROB = softmax output layer, 1-of-N output encoding (use two output neurons)
  const unsigned int outType = PROB;

  // set learning rate, momentum, decay rate
  const double learningRate = 0.15;
  const double momentum =     0.0;
  const double decayRate =    0.0;
  DigitNet.setParams(learningRate,momentum,decayRate,outType);

  std::cout << "done" << std::endl;
  
  // load training and test data
  std::cout << "loading data..." << "\t \t \t";
  std::vector< std::vector<double> > bigData( DATA_SIZE,std::vector<double>(INPUT+1,0.0) );
  loadFromFile(bigData,"train.txt");

  std::vector< std::vector<double> > trainData( TRAIN_SIZE,std::vector<double>(INPUT+1,0.0) );
  std::vector< std::vector<double> > testData( TEST_SIZE,std::vector<double>(INPUT+1,0.0) );
  
  buildData(bigData,trainData,TRAIN_SIZE,testData,TEST_SIZE);
  std::cout << "done" << std::endl;
  
  // loop over training data points and train net
  // slice off first column of each row (example)
  times=(int)time(NULL);   // init time counter
  std::cout << "\n" << "training examples: " << "\t \t" << TRAIN_SIZE << std::endl;
  std::cout << "learning rate: " << "\t \t \t" << learningRate << std::endl;
  std::cout << "momentum: " << "\t \t \t" << momentum << std::endl;
  std::cout << "weight decay: " << "\t \t \t" << decayRate << std::endl;
  std::cout << "training network..." << "\t \t";
  for(int i=0;i<TRAIN_SIZE;++i)
  {
    std::vector<double> data = trainData[i];            // extract data point
    double label = data[0];                             // extract point label
    data.erase(data.begin());
    std::vector<double> nLabel = encode((int)label);    // encode to 1-of-N   
    
    std::vector<double> outputs = DigitNet.runNet(data);
    error = DigitNet.trainNet(data,nLabel,outType);    // train net, return MSE

    // decode output and compare to correct output 
    if( decode(outputs) == (int)label )
        truecnt++;    
  }

  // stop timer and print out useful info
  timed=(int)time(NULL);
  times=timed-times;
  std::cout << "done" << std::endl;
  std::cout << "training time: " << "\t \t \t" << times << " seconds " << std::endl;
  std::cout << "training accuracy: " << "\t \t" << truecnt*100./TRAIN_SIZE << "%" << std::endl;
  
  // test net on test data
  times=(int)time(NULL);   // init time counter
  std::cout << "\n" << "test points: " << "\t \t \t" << TEST_SIZE << std::endl;
  std::cout << "testing network..." << "\t \t";
  truecnt = 0;
  for(int i=0;i<TEST_SIZE;++i)
  {
    
    std::vector<double> data = testData[i];     // extract data point 
    double label = data[0];                     // extract label
    data.erase(data.begin());
   
    std::vector<double> outputs = DigitNet.runNet(data);    // run net

    // decode output and compare to correct output 
    if( decode(outputs) == (int)label )
        truecnt++;    
    
  }

  // stop timer and print out useful info
  timed=(int)time(NULL);
  times=timed-times;
  std::cout << "done" << std::endl;
  std::cout << "testing time: " << "\t \t \t" << times << " seconds " << std::endl;
  std::cout << "test accuracy: " << "\t \t \t" << truecnt*100./TEST_SIZE << "% " << std::endl;
  
  // save weights to reuse net in the future
  DigitNet.saveNet();
  
}