Ejemplo n.º 1
0
static int __init module_start(void)
{
	int i, err = 0;
	dev_t devno = 0;

	PDEBUG("Module is loaded\n");
	
	/*
	 * alloc device number
	 */
	err = alloc_chrdev_region(&devno, echo_minor, ECHO_NR_DEVS,
		"echo");
	if (err < 0) {
		printk(KERN_WARNING "Can't alloc device number!");
		return err;
	} else {
		echo_major = MAJOR(devno);
	}

	echo_devs = (struct echo_dev *)
		    kmalloc(ECHO_NR_DEVS * sizeof(*echo_devs), GFP_KERNEL);
	for (i = 0; i < ECHO_NR_DEVS; ++i) {
		init_it(echo_devs + i, i);
	}

	return 0;
}
Ejemplo n.º 2
0
int main(int argc,char *argv[]){
	int *myray,*send_ray=NULL,*back_ray=NULL;
#ifndef _CIVL
	int count;
#endif
	int size,mysize,i,k,j,total;
	
	init_it(&argc,&argv);
/* each processor will get count elements from the root */
#ifndef _CIVL
	count=4;
#endif
	myray=(int*)malloc(count*sizeof(int));
/* create the data to be sent on the root */
	if(myid == mpi_root){
	    size=count*numnodes;
		send_ray=(int*)malloc(size*sizeof(int));
		back_ray=(int*)malloc(numnodes*sizeof(int));
		for(i=0;i<size;i++)
			send_ray[i]=i;
		}
/* send different data to each processor */
	mpi_err = MPI_Scatter(	send_ray, count,   MPI_INT,
						    myray,    count,   MPI_INT,
	                 	    mpi_root,
	                 	    MPI_COMM_WORLD);
	                
/* each processor does a local sum */
	total=0;
	for(i=0;i<count;i++)
	    total=total+myray[i];
	printf("myid= %d total= %d\n",myid,total);
#ifdef _CIVL
	$assert(total == myid*25 + 10);
#endif
/* send the local sums back to the root */
    mpi_err = MPI_Gather(&total,    1,  MPI_INT, 
						back_ray, 1,  MPI_INT, 
	                 	mpi_root,                  
	                 	MPI_COMM_WORLD);
/* the root prints the global sum */
	if(myid == mpi_root){
	  total=0;
	  for(i=0;i<numnodes;i++)
	    total=total+back_ray[i];
	  printf("results from all processors= %d \n",total);
#ifdef _CIVL
	  $assert(total == 25*numnodes*(numnodes-1)/2+10*numnodes);
#endif
	}
#ifdef _CIVL
	free(myray);
	free(send_ray);
	free(back_ray);
#endif
    mpi_err = MPI_Finalize();
}
Ejemplo n.º 3
0
int main(int argc,char *argv[]){
/* poe a.out -procs 3 -rmpool 1 */
	int *will_use;
	int *myray,*displacements,*counts,*allray;
	int size,mysize,i;
	
	init_it(&argc,&argv);
	mysize=myid+1;
	myray=(int*)malloc(mysize*sizeof(int));
	for(i=0;i<mysize;i++)
		myray[i]=myid+1;
/* counts and displacement arrays are only required on the root */
	if(myid == mpi_root){
		counts=(int*)malloc(numnodes*sizeof(int));
		displacements=(int*)malloc(numnodes*sizeof(int));
	}
/* we gather the counts to the root */
	mpi_err = MPI_Gather((void*)myray,1,MPI_INT, 
					     (void*)counts,  1,MPI_INT, 
					     mpi_root,MPI_COMM_WORLD);
/* calculate displacements and the size of the recv array */
	if(myid == mpi_root){
		displacements[0]=0;
		for( i=1;i<numnodes;i++){
			displacements[i]=counts[i-1]+displacements[i-1];
		}
		size=0;
		for(i=0;i< numnodes;i++)
			size=size+counts[i];
		allray=(int*)malloc(size*sizeof(int));
	}
/* different amounts of data from each processor  */
/* is gathered to the root */
	mpi_err = MPI_Gatherv(myray, mysize,         MPI_INT, 
	                 allray,counts,displacements,MPI_INT, 
	                 mpi_root,                                
	                 MPI_COMM_WORLD);
	                
	if(myid == mpi_root){
		for(i=0;i<size;i++)
			printf("%d ",allray[i]);
		printf("\n");
	}
    mpi_err = MPI_Finalize();
}
Ejemplo n.º 4
0
int
main (int argc, char *argv[])
{
    int *sray, *rray;
    int *sdisp, *scounts, *rdisp, *rcounts;
    int ssize, rsize, i, k, j;
    float z;

    init_it (&argc, &argv);
    scounts = (int *) shmem_malloc (sizeof (int) * numnodes);
    rcounts = (int *) shmem_malloc (sizeof (int) * numnodes);
    sdisp = (int *) shmem_malloc (sizeof (int) * numnodes);
    rdisp = (int *) shmem_malloc (sizeof (int) * numnodes);
    /* 
       ! seed the random number generator with a ! different number on each
       processor */
    seed_random (myid);
    /* find data to send */
    for (i = 0; i < numnodes; i++) {
        random_number (&z);
        scounts[i] = (int) (10.0 * z) + 1;
    }
    printf ("-------myid= %d scounts=", myid);
    for (i = 0; i < numnodes; i++) {
        printf ("%d ", scounts[i]);
    }
    printf ("\n");

    /* send the data */
    // mpi_err = MPI_Alltoall(scounts,1,MPI_INT, rcounts,1,MPI_INT,
    // MPI_COMM_WORLD);
    shmem_barrier_all ();
    int other, j1;
    for (j1 = 0; j1 < numnodes; j1++) {
        shmem_int_put (&rcounts[myid], &scounts[j1], 1, j1);
    }
    shmem_barrier_all ();
    printf ("myid= %d rcounts=", myid);
    for (i = 0; i < numnodes; i++) {
        printf ("%d ", rcounts[i]);
    }
    printf ("\n");
    shmem_finalize ();
    return 0;
}
Ejemplo n.º 5
0
static int	button_press(int button, int y, int x, t_fol *f)
{
	if (button == 2)
		f->j_mod = (f->j_mod == 0 ? 1 : 0);
	if ((button == 4 && f->zoom < 8000000000000000) || button == 5)
	{
		if (button == 5)
			f->zoom /= 1.1;
		f->mid.x += f->curs.x * f->zoom * (button == 5 ? 0.1 : -0.1);
		f->mid.y += f->curs.y * f->zoom * (button == 5 ? 0.1 : -0.1);
		if (button == 4)
			f->zoom *= 1.1;
		f->i_max = f->type == 'b' || f->type == 'B' ? 50 : 100;
		init_it(f);
		init_ib(f);
	}
	return (x + y);
}
Ejemplo n.º 6
0
int main(int argc,char *argv[]){
	int *sray,*rray;
	int *sdisp,*scounts,*rdisp,*rcounts;
	int ssize,rsize,i,k,j;
	float z;

	init_it(&argc,&argv);
	scounts=(int*)malloc(sizeof(int)*numnodes);
	rcounts=(int*)malloc(sizeof(int)*numnodes);
	sdisp=(int*)malloc(sizeof(int)*numnodes);
	rdisp=(int*)malloc(sizeof(int)*numnodes);
/*
! seed the random number generator with a
! different number on each processor
*/
	seed_random(myid);
/* find  data to send */
	for(i=0;i<numnodes;i++){
		random_number(&z);
		scounts[i]=(int)(10.0*z)+1;
	}
	printf("myid= %d scounts=",myid);
	for(i=0;i<numnodes;i++)
		printf("%d ",scounts[i]);
	printf("\n");
/* send the data */
	mpi_err = MPI_Alltoall(	scounts,1,MPI_INT,
						    rcounts,1,MPI_INT,
	                 	    MPI_COMM_WORLD);
	printf("myid= %d rcounts=",myid);
	for(i=0;i<numnodes;i++)
		printf("%d ",rcounts[i]);
	printf("\n");
#ifdef _CIVL
free(scounts);
free(rcounts);
free(sdisp);
free(rdisp);
#endif
    mpi_err = MPI_Finalize();
}
Ejemplo n.º 7
0
int main(int argc,char *argv[]){
	int *myray,*send_ray,*back_ray;
	int count;
	int size,mysize,i,k,j,total,gtotal;
	
	init_it(&argc,&argv);
/* each processor will get count elements from the root */
	count=4;
	myray=(int*)malloc(count*sizeof(int));
/* create the data to be sent on the root */
	if(myid == mpi_root){
	    size=count*numnodes;
		send_ray=(int*)malloc(size*sizeof(int));
		back_ray=(int*)malloc(numnodes*sizeof(int));
		for(i=0;i<size;i++)
			send_ray[i]=i;
		}
/* send different data to each processor */
	mpi_err = MPI_Scatter(	send_ray, count,   MPI_INT,
						    myray,    count,   MPI_INT,
	                 	    mpi_root,
	                 	    MPI_COMM_WORLD);	                
/* each processor does a local sum */
	total=0;
	for(i=0;i<count;i++)
	    total=total+myray[i];
	printf("myid= %d total= %d\n ",myid,total);
/* send the local sums back to the root */
    mpi_err = MPI_Reduce(&total,    &gtotal, 1,  MPI_INT, 
						MPI_SUM, 
	                 	mpi_root,                  
	                 	MPI_COMM_WORLD);
/* the root prints the global sum */
	if(myid == mpi_root){
	  printf("results from all processors= %d \n ",gtotal);
	}
    mpi_err = MPI_Finalize();
}
Ejemplo n.º 8
0
int			init_fractol(t_fol *f)
{
	f->zoom = WIN_H / 3;
	f->mid.x = f->type == 'j' ? WIN_H / 2 : 2 * WIN_H / 3;
	f->mid.y = WIN_W / 2;
	f->ud = 0;
	f->lr = 0;
	f->zoom_io = 0;
	f->color = 0;
	f->j_param.x = 0;
	f->j_param.y = 0;
	f->i_max = f->type == 'b' || f->type == 'B' ? 50 : 100;
	f->j_mod = 0;
	f->i_mod = 0;
	f->b_max = 0;
	f->b_max1 = 0;
	f->b_max2 = 0;
	f->i_min = 0;
	f->power = 2;
	init_it(f);
	init_ib(f);
	return (1);
}
Ejemplo n.º 9
0
int
main (int argc, char *argv[])
{
    int *sray, *rray;
    int *sdisp, *scounts, *rdisp, *rcounts;
    int ssize, rsize, i, k, j;
    float z;

    init_it (&argc, &argv);
    scounts = (int *) shmem_malloc (sizeof (int) * numnodes);
    rcounts = (int *) shmem_malloc (sizeof (int) * numnodes);
    sdisp = (int *) shmem_malloc (sizeof (int) * numnodes);
    rdisp = (int *) shmem_malloc (sizeof (int) * numnodes);
    /* 
       ! seed the random number generator with a ! different number on each
       processor */
    seed_random (myid);
    /* find out how much data to send */
    for (i = 0; i < numnodes; i++) {
        random_number (&z);
        scounts[i] = (int) (5.0 * z) + 1;
    }
    printf ("myid= %d scounts=%d %d %d %d\n", myid, scounts[0], scounts[1],
            scounts[2], scounts[3]);
    /* tell the other processors how much data is coming */
    // mpi_err = MPI_Alltoall(scounts,1,MPI_INT, rcounts,1,MPI_INT,
    // MPI_COMM_WORLD);
    shmem_barrier_all ();
    int other, j1;
    for (j1 = 0; j1 < numnodes; j1++) {
        shmem_int_put (&rcounts[myid], &scounts[j1], 1, j1);
    }
    shmem_barrier_all ();
    printf ("-----myid= %d rcounts=", myid);
    for (i = 0; i < numnodes; i++) {
        printf ("%d ", rcounts[i]);
    }
    printf ("\n");


    /* write(*,*)"myid= ",myid," rcounts= ",rcounts */
    /* calculate displacements and the size of the arrays */
    sdisp[0] = 0;
    for (i = 1; i < numnodes; i++) {
        sdisp[i] = scounts[i - 1] + sdisp[i - 1];
    }
    rdisp[0] = 0;
    for (i = 1; i < numnodes; i++) {
        rdisp[i] = rcounts[i - 1] + rdisp[i - 1];
    }
    ssize = 0;
    rsize = 0;
    for (i = 0; i < numnodes; i++) {
        ssize = ssize + scounts[i];
        rsize = rsize + rcounts[i];
    }

    /* allocate send and rec arrays */
    sray = (int *) shmem_malloc (sizeof (int) * 20);
    rray = (int *) shmem_malloc (sizeof (int) * 20);
    for (i = 0; i < ssize; i++) {
        sray[i] = myid;
    }
    /* send/rec different amounts of data to/from each processor */
    // mpi_err = MPI_Alltoallv(sray,scounts,sdisp,MPI_INT,
    // rray,rcounts,rdisp,MPI_INT, MPI_COMM_WORLD);
    shmem_barrier_all ();
    for (j1 = 0; j1 < numnodes; j1++) {
        int k1 = sdisp[j1];
        static int k2;
        shmem_int_get (&k2, &rdisp[myid], 1, j1);
        shmem_int_put (rray + k2, sray + k1, scounts[j1], j1);
    }
    shmem_barrier_all ();

    printf ("myid= %d rray=", myid);
    for (i = 0; i < rsize; i++) {
        printf ("%d ", rray[i]);
    }
    printf ("\n");
    // mpi_err = MPI_Finalize();
    shmem_finalize ();
    return 0;
}
Ejemplo n.º 10
0
int
main (int argc, char *argv[])
{
    int *sray, *rray;
    int *sdisp, *scounts, *rdisp, *rcounts, *rcounts_full;
    int ssize, rsize, i, k, j;
    float z;

    init_it (&argc, &argv);
    scounts = (int *) shmem_malloc (sizeof (int) * numnodes);
    rcounts = (int *) shmem_malloc (sizeof (int) * numnodes);
    rcounts_full = (int *) shmem_malloc (sizeof (int) * numnodes * numnodes);
    sdisp = (int *) shmem_malloc (sizeof (int) * numnodes);
    rdisp = (int *) shmem_malloc (sizeof (int) * numnodes);
    /* 
       ! seed the random number generator with a ! different number on each
       processor */
    seed_random (myid);
    /* find out how much data to send */
    for (i = 0; i < numnodes; i++) {
        random_number (&z);
        scounts[i] = (int) (5.0 * z) + 1;
    }
    printf ("myid= %d scounts=%d %d %d %d\n", myid, scounts[0], scounts[1],
            scounts[2], scounts[3]);
    printf ("\n");
    /* tell the other processors how much data is coming */
    // mpi_err = MPI_Alltoall(scounts,1,MPI_INT, rcounts,1,MPI_INT,
    // MPI_COMM_WORLD);
    static long psync[_SHMEM_COLLECT_SYNC_SIZE];
    for (i = 0; i < _SHMEM_COLLECT_SYNC_SIZE; i++)
        psync[i] = _SHMEM_SYNC_VALUE;
    shmem_barrier_all ();
    int other, j1;
    shmem_fcollect32 (rcounts_full, scounts, 4, 0, 0, numnodes, psync);
    for (i = 0; i < numnodes; i++) {
        rcounts[i] = rcounts_full[i * numnodes + myid];
    }
    printf ("-----myid= %d rcounts=", myid);
    for (i = 0; i < numnodes; i++)
        printf ("%d ", rcounts[i]);
    printf ("\n");


    /* write(*,*)"myid= ",myid," rcounts= ",rcounts */
    /* calculate displacements and the size of the arrays */
    sdisp[0] = 0;
    for (i = 1; i < numnodes; i++) {
        sdisp[i] = scounts[i - 1] + sdisp[i - 1];
    }
    rdisp[0] = 0;
    for (i = 1; i < numnodes; i++) {
        rdisp[i] = rcounts[i - 1] + rdisp[i - 1];
    }
    ssize = 0;
    rsize = 0;
    for (i = 0; i < numnodes; i++) {
        ssize = ssize + scounts[i];
        rsize = rsize + rcounts[i];
    }

    /* allocate send and rec arrays */
    sray = (int *) shmem_malloc (sizeof (int) * 20);
    rray = (int *) shmem_malloc (sizeof (int) * 20);
    for (i = 0; i < ssize; i++) {
        sray[i] = myid;
    }
    /* send/rec different amounts of data to/from each processor */
    // mpi_err = MPI_Alltoallv(sray,scounts,sdisp,MPI_INT,
    // rray,rcounts,rdisp,MPI_INT, MPI_COMM_WORLD);
    shmem_barrier_all ();
    for (j1 = 0; j1 < numnodes; j1++) {
        int k1 = sdisp[j1];
        static int k2;
        shmem_int_get (&k2, &rdisp[myid], 1, j1);
        shmem_int_put (rray + k2, sray + k1, scounts[j1], j1);
    }
    shmem_barrier_all ();

    // not possible, coz even though the rcounts[myid] will be different on
    // each PE, the elements collected
    // by PE0 from other PE's will be constant.
    // shmem_collect32(rray_full,sray,rcounts[myid],0,0,numnodes,psync);

    printf ("myid= %d rray=", myid);
    for (i = 0; i < rsize; i++)
        printf ("%d ", rray[i]);
    printf ("\n");
    // mpi_err = MPI_Finalize();
    shmem_finalize ();
    return 0;
}