예제 #1
0
int 
conpar(integer nov, integer na, integer nra, integer nca, doublereal ***a, integer ncb, doublereal ***b, integer nrc, doublereal ***c, doublereal **d, integer *irf, integer *icf)
{
  integer nex;

  nex = nca - (nov << 1);
  if (nex == 0) {
    return 0;
  }

  switch(global_conpar_type) {
#ifdef PTHREADS
  case CONPAR_PTHREADS:
    conpar_threads_wrapper(nov, na, nra, nca, a, 
			    ncb, b, nrc, c, d, irf, icf);
    break;
#endif
#ifdef MPI
  case CONPAR_MPI:
    if(global_verbose_flag)
      printf("MPI conpar start\n");
    conpar_mpi_wrapper(nov, na, nra, nca, a, 
			ncb, b, nrc, c, d,irf, icf);
    if(global_verbose_flag)
      printf("MPI conpar end\n");
    break;
#endif
  default:
    conpar_process(nov, na, nra, nca, a, ncb, b, nrc, c, d, irf, icf);
    break;
  }
  return 0;
}
예제 #2
0
static void *conpar_threads_process(void * arg)
{
  conpar_parallel_arglist *p = arg;
  integer i,j;
  /* A little explanation of what is going on here
     is in order I believe.  This array is
     created by a summation across all workers. */
  p->d = dmatrix(p->nrc, p->ncb);
  /* In the shared memory case we sum into a local
     variable our contribution, and then sum
     into shared memory at the end */
  for(i=0;i<p->nrc;i++)
    for (j=0; j<p->ncb;j++)
      p->d[i][j]=0.0;
  conpar_process(p->nov, p->na, p->nra, p->nca, p->a, p->ncb, p->b,
		 p->nrc, p->c, p->d, p->irf, p->icf);
  return NULL;
}
예제 #3
0
파일: conpar.c 프로젝트: F-A/pydstool
int 
conpar_default_wrapper(integer *nov, integer *na, integer *nra, integer *nca, doublereal ***a, integer *ncb, doublereal ***b, integer *nbc, integer *nrc, doublereal ***c, doublereal **d, integer *irf, integer *icf)

{
    conpar_parallel_arglist data;
    data.nov = nov;
    data.nra = nra;
    data.nca = nca;
    data.a = a;
    data.ncb = ncb;
    data.b = b;
    data.nbc = nbc;
    data.nrc = nrc;
    data.c = c;
    data.d = d;
    data.irf = irf;
    data.icf = icf;
    data.loop_start = 0;
    data.loop_end = *na;
    conpar_process(&data);
    return 0;
}