TreeModel::TreeModel(Teuchos::RCP<Teuchos::ParameterList> validParameters, QString saveFileName, QObject *parent):
	QAbstractItemModel(parent),
	dependencies(false),
	validParameters(validParameters)
{
	basicSetup(saveFileName);
}
TreeModel::TreeModel(Teuchos::RCP<Teuchos::ParameterList> validParameters, Teuchos::RCP<Optika::DependencySheet> dependencySheet,
     QString saveFileName, QObject *parent):
	 QAbstractItemModel(parent),
	 dependencies(true),
	 validParameters(validParameters),
	 dependencySheet(dependencySheet)
{
	basicSetup(saveFileName);
	connect(this, SIGNAL(dataChanged(const QModelIndex&, const QModelIndex&)), 
		this, SLOT(dataChangedListener(const QModelIndex&, const QModelIndex&)));
}
// on "init" you need to initialize your instance
bool TutorialZapMunch::init()
{
    //////////////////////////////
    // 1. super init first
    if ( !CCLayer::init() )
    {
	    return false;
    }
 	basicSetup();
	addPlayers();

    return true;
}
// on "init" you need to initialize your instance
bool CCLayerParent::init()
{
    //////////////////////////////
    // 1. super init first
    if ( !CCLayer::init() )
    {
	    return false;
    }
 	basicSetup();
	addPlayers();

    return true;
}
Exemple #5
0
bool ParallaxObject::init()
{
	// If the base class's init() isn't successfull, return false
	if(!Object::init())
	{
		return false;
	}
	
	// Set variables to default value
	basicSetup();

	return true;
}
Exemple #6
0
bool ParallaxObject::initWithFileName(const char* fileName)
{
	// If the base class's init() isn't successfull, return false
	if(!Object::init())
	{
		return false;
	}
	
	// Set variables to default value
	basicSetup();

	// Initialize the sprites with the proper filename
	this->setSprite(fileName);

	return true;
}
void CCRTPPacketInit2(CCRTPPacket* crPacket,char cData, int nPort) {
  basicSetup(crPacket);
  setPort(crPacket,nPort);
  
  setData(crPacket,&cData, 1);
}
void CCRTPPacketInit3(CCRTPPacket* crPacket,char *cData, int nDataLength, int nPort) {
  basicSetup(crPacket);
  setPort(crPacket,nPort);
  
  setData(crPacket,cData, nDataLength);
}
void CCRTPPacketInit1(CCRTPPacket* crPacket,int nPort) {
  basicSetup(crPacket);
  setPort(crPacket,nPort);
}
int main(int argc, char** argv){
	// first input is the task number (5 = sumSlow)
	// second input is the maximal n
	int Task;
	if(argc > 1)
	{
		if ((atoi(argv[1])>0) && (atoi(argv[1])<6))
			Task = atoi(argv[1]);
		else
			Task=4;
	}
	else
		Task = 4;

	int iterations;
	if(argc > 2)
	{
		if (atoi(argv[2])>0) 
			iterations = atoi(argv[2]);
		else
			iterations=13;
	}	
	else
		iterations = 13;

	//printf("%d\n",iterations);
	int rank = 0;
	int i; // Generic loop variable.
	double startTime; // Storing the start time while measuring.
	double S = (M_PI*M_PI)/6; // The limit of the series.
	MPI_Init(&argc,&argv);

	// Setting up the size of the partial sums to generate. This should be altered to read something from the command line.
	//Sint iterations = 13; // Number of different summing lengths.
	int N[iterations]; // Vector with the summetion lengths.
	double* Sn     = (double*)malloc(iterations*sizeof(double));
	double* SnSlow = (double*)malloc(iterations*sizeof(double)); // Vectors of the partial sums.
	basicSetup(iterations, N, Sn, SnSlow);

	
	if (Task==1)
	{		
		printf("running the non-parallelized programm (Task1)\n");
		printf("n \terror \t\ttime\n");
		//for(i=0; i<iterations; ++i)
		i=iterations-1;
		{
			startTime= WallTime();
			Sn[i] = sum(N[i]);
			printf("%d \t%e \t%e\n",N[i], S-Sn[i],startTime- WallTime());
		}
			
	}
	if (Task==2)
	{	
		printf("running the openMP-parallelized programm (Task2)\n");
		printf("n \terror \t\ttime\n");
		//for(i=0; i<iterations; ++i)
		i=iterations-1;
		{
			startTime= WallTime();
			Sn[i] = sumShared(N[i]);
			printf("%d \t%e \t%e\n",N[i], S-Sn[i],startTime- WallTime());
		}
			
	}
	if (Task==3)
	{	
		MPI_Comm_rank(MPI_COMM_WORLD,&rank);
		if(rank==0)
		{			
			printf("running the MPI-parallelized programm (Task3)\n");
			printf("n \terror \t\ttime\n");
		}
		//for(i=0; i<iterations; ++i)
		i=iterations-1;
		{
			if(rank==0)
				startTime= WallTime();
			Sn[i] = sumDist(N[i],&rank);
			if(rank==0)
				printf("%d \t%e \t%e\n",N[i], S-Sn[i],startTime- WallTime());
		}
			
	}
	if (Task==4)
	{	
		MPI_Comm_rank(MPI_COMM_WORLD,&rank);
		if(rank==0)
		{
			printf("running the openMP- and MPI-parallelized programm (Task4)\n");
			printf("n \terror \t\ttime\n");
		}
		//for(i=0; i<iterations; ++i)
		i=iterations-1;
		{
			if(rank==0)
				startTime= WallTime();
			Sn[i] = sumHybrid(N[i],&rank);
			if(rank==0)
				printf("%d \t%e \t%e\n",N[i], S-Sn[i],startTime- WallTime());
		}			
	}
	if (Task==5)
	{		
		printf("running the non-parallelized programm with better summation order(Task1)\n");
		printf("n \terror \t\ttime\n");
		//for(i=0; i<iterations; ++i)
		i=iterations-1;
		{
			startTime= WallTime();
			Sn[i] = sumSlow(N[i]);
			printf("%d \t%e \t%e\n",N[i], S-Sn[i],startTime- WallTime());
		}
			
	}
	
	free(Sn); free(SnSlow);
	
	MPI_Finalize();
	return 0;
}