Ejemplo n.º 1
0
void Correlator::Run() {

  complex <double> ycorr;
  
  Matrix < complex <double> > yvec(1,indim);

  /////////// fetch of sampled input
  yvec = vin1.GetDataObj();
    
  ycorr = (Hermitian(yvec)*wvec)[0][0] ;

  // cout << ycorr << endl;

  //
  // Decision process
  //

  unsigned int Ns=(1 << Nb());
  unsigned int symbol_id=Ns, outbit;
  double mindist=VERY_BIG_FLOAT;

  for (unsigned int sid=0; sid<Ns; sid++){
    if (distance(sid,ycorr)<mindist) {
      symbol_id=sid;
      mindist=distance(sid,ycorr);
    }
  }

  for (int i=0; i<Nb(); i++) {
    outbit=( gray_decoding[symbol_id] >> (Nb()-i-1) ) & 0x1;
    out1.DeliverDataObj( outbit );
  }
}
Ejemplo n.º 2
0
void MainWindow::_update_scene_matrix(const QMatrix4x4& mat)
{
    QMatrix4x4 moo;

    QVector3D eye(sqrt(2.0)*_axis_len/2.0,
                  0,
                  sqrt(2.0)*_axis_len/2.0);
    QVector3D center(0,0,0);
    QVector3D up(0,1,0);

    //moo.lookAt(eye,center,up);

    QVector4D xvec(_axis_len,0,0,1);
    QVector4D yvec(0,_axis_len,0,1);
    QVector4D zvec(0,0,_axis_len,1);

    xvec = moo*_mat*xvec;
    yvec = moo*_mat*yvec;
    zvec = moo*_mat*zvec;

    _xitem->setLine(0,0,xvec.x(),-xvec.y());
    _yitem->setLine(0,0,yvec.x(),-yvec.y());
    _zitem->setLine(0,0,zvec.x(),-zvec.y());
}
Ejemplo n.º 3
0
CString CGenethonDoc::runTest3(PyObject *pModule, CString& function) {

    CString output = CString();
    PyObject *pDict, *pFunc;
    PyObject *pArgTuple, *pValue, *pXVec, *pYVec;
    PyObject *ptype, *pvalue, *ptraceback;

    int i;

    // Set the path to include the current directory in case the module is located there. Found from
    // http://stackoverflow.com/questions/7624529/python-c-api-doesnt-load-module
    // and http://stackoverflow.com/questions/7283964/embedding-python-into-c-importing-modules


    if (pModule != NULL) {
        pFunc = PyObject_GetAttrString(pModule, function);   //Get the function by its name
        // pFunc is a new reference

        if (pFunc && PyCallable_Check(pFunc)) {

            //Set up a tuple that will contain the function arguments. In this case, the
            //function requires two tuples, so we set up a tuple of size 2.
            pArgTuple = PyTuple_New(2);

            //Make some vectors containing the data
            static const double xarr[] = { 1,2,3,4,5,6,7,8,9,10,11,12,13,14 };
            std::vector<double> xvec(xarr, xarr + sizeof(xarr) / sizeof(xarr[0]));
            static const double yarr[] = { 0,0,1,1,0,0,2,2,0,0,1,1,0,0 };
            std::vector<double> yvec(yarr, yarr + sizeof(yarr) / sizeof(yarr[0]));

            //Transfer the C++ vector to a python tuple
            pXVec = PyTuple_New(xvec.size());
            for (i = 0; i < xvec.size(); ++i) {
                pValue = PyFloat_FromDouble(xvec[i]);
                if (!pValue) {
                    Py_DECREF(pXVec);
                    Py_DECREF(pModule);
                    return "Cannot convert array value\n";
                }
                PyTuple_SetItem(pXVec, i, pValue);
            }

            //Transfer the other C++ vector to a python tuple
            pYVec = PyTuple_New(yvec.size());
            for (i = 0; i < yvec.size(); ++i) {
                pValue = PyFloat_FromDouble(yvec[i]);
                if (!pValue) {
                    Py_DECREF(pYVec);
                    Py_DECREF(pModule);
                    return "Cannot convert array value\n";
                }
                PyTuple_SetItem(pYVec, i, pValue); //
            }

            //Set the argument tuple to contain the two input tuples
            PyTuple_SetItem(pArgTuple, 0, pXVec);
            PyTuple_SetItem(pArgTuple, 1, pYVec);

            //Call the python function
            pValue = PyObject_CallObject(pFunc, pArgTuple);

            Py_DECREF(pArgTuple);
            //			Py_DECREF(pXVec);
            //			Py_DECREF(pYVec);

            if (pValue != NULL) {
                PyObject* v = PyObject_GetAttrString(pModule, "richTextOutput");
                output.Format("Result of call: %ld", PyLong_AsLong(pValue));
                Py_DECREF(pValue);
            }

            //Some error catching
            else {
                Py_DECREF(pFunc);
                Py_DECREF(pModule);
                return handlePyError();
            }
        }
        else {
            if (PyErr_Occurred()) {
                return handlePyError();
            }
        }
        Py_XDECREF(pFunc);
        Py_DECREF(pModule);
    }
    else {
        return handlePyError();
    }
    return output;
}
Ejemplo n.º 4
0
FoMo::RenderCube CGAL2D(FoMo::GoftCube goftcube, const double l, const int x_pixel, const int y_pixel, const int lambda_pixel, const double lambda_width)
{
//
// results is an array of at least dimension (x2-x1+1)*(y2-y1+1)*lambda_pixel and must be initialized to zero
//
// determine contributions per pixel

    typedef K::FT                                         Coord_type;
    typedef K::Point_2                                    Point;
    std::map<Point, Coord_type, K::Less_xy_2> peakmap, fwhmmap, losvelmap;
//        typedef CGAL::Data_access< std::map<Point, Coord_type, K::Less_xyz_3 > >  Value_access;

    int commrank;
#ifdef HAVEMPI
    MPI_Comm_rank(MPI_COMM_WORLD,&commrank);
#else
    commrank = 0;
#endif
    //FoMo::DataCube goftcube=object.datacube;
    FoMo::tgrid grid = goftcube.readgrid();
    int ng=goftcube.readngrid();

    // We will calculate the maximum image coordinates by projecting the grid onto the image plane
    // Rotate the grid over an angle -l (around z-axis), and -b (around y-axis)
    // Take the min and max of the resulting coordinates, those are coordinates in the image plane
    if (commrank==0) std::cout << "Rotating coordinates to POS reference... " << std::flush;
    std::vector<double> xacc, yacc, zacc;
    xacc.resize(ng);
    yacc.resize(ng);
    zacc.resize(ng);
    std::vector<double> gridpoint;
    gridpoint.resize(3);
    Point temporarygridpoint;
    // Define the unit vector along the line-of-sight: the LOS is along y
    std::vector<double> unit = {cos(l), sin(l)};
    // Read the physical variables
    FoMo::tphysvar peakvec=goftcube.readvar(0);//Peak intensity
    FoMo::tphysvar fwhmvec=goftcube.readvar(1);// line width, =1 for AIA imaging
    FoMo::tphysvar vx=goftcube.readvar(2);
    FoMo::tphysvar vy=goftcube.readvar(3);
    double losvelval;

// No openmp possible here
// Because of the insertions at the end of the loop, we get segfaults :(
    /*#ifdef _OPENMP
    #pragma omp parallel for
    #endif*/
    for (int i=0; i<ng; i++)
    {
        for (int j=0; j<2; j++)	gridpoint[j]=grid[j][i];
        xacc[i]=gridpoint[0]*cos(l)-gridpoint[1]*sin(l);// rotated grid
        yacc[i]=gridpoint[0]*sin(l)+gridpoint[1]*cos(l);
        temporarygridpoint=Point(grid[0][i],grid[1][i]); //position vector
        // also create the map function_values here
        std::vector<double> velvec = {vx[i], vy[i]};// velocity vector
        losvelval = inner_product(unit.begin(),unit.end(),velvec.begin(),0.0);//velocity along line of sight for position [i]/[ng]
        losvelmap[temporarygridpoint]=Coord_type(losvelval);
        peakmap[temporarygridpoint]=Coord_type(peakvec[i]);
        fwhmmap[temporarygridpoint]=Coord_type(fwhmvec[i]);
        /*		peakmap.insert(make_pair(temporarygridpoint,Coord_type(peakvec[i])));
        		fwhmmap.insert(make_pair(temporarygridpoint,Coord_type(fwhmvec[i])));
        		losvelmap.insert(make_pair(temporarygridpoint,Coord_type(losvelval)));*/
    }
    double minx=*(min_element(xacc.begin(),xacc.end()));
    double maxx=*(max_element(xacc.begin(),xacc.end()));
    double miny=*(min_element(yacc.begin(),yacc.end()));
    double maxy=*(max_element(yacc.begin(),yacc.end()));
    /*	Value_access peak=Value_access(peakmap);
    	Value_access fwhm=Value_access(fwhmmap);
    	Value_access losvel=Value_access(losvelmap);*/
    xacc.clear(); // release the memory
    yacc.clear();
    if (commrank==0) std::cout << "Done!" << std::endl;

    std::string chiantifile=goftcube.readchiantifile();
    double lambda0=goftcube.readlambda0();// lambda0=AIA bandpass for AIA imaging
    double lambda_width_in_A=lambda_width*lambda0/speedoflight;
    Delaunay_triangulation_2 DT=triangulationfrom2Ddatacube(goftcube);
    Delaunay_triangulation_2 * DTpointer=&DT;

    if (commrank==0) std::cout << "Building frame: " << std::flush;
    double x,y,z,intpolpeak,intpolfwhm,intpollosvel,lambdaval,tempintens;
    int li,lj,ind;
    Point p,nearest;
    Delaunay_triangulation_2::Vertex_handle v;
    Delaunay_triangulation_2::Locate_type lt;
    Delaunay_triangulation_2::Face_handle c;
    boost::progress_display show_progress(x_pixel*y_pixel);

    //initialize grids
    FoMo::tgrid newgrid;
    FoMo::tcoord xvec(x_pixel*lambda_pixel),yvec(x_pixel*lambda_pixel),lambdavec(x_pixel*lambda_pixel);
    newgrid.push_back(xvec);
    if (lambda_pixel > 1) newgrid.push_back(lambdavec);
    FoMo::tphysvar intens(x_pixel*lambda_pixel,0);

#ifdef _OPENMP
    #pragma omp parallel for schedule(dynamic) private (x,y,p,lt,li,v,nearest,intpolpeak,intpolfwhm,intpollosvel,lambdaval,tempintens,ind)
#endif
    for (int j=0; j<x_pixel; j++)
    {
#ifdef _OPENMP
        #pragma omp task
#endif
        for (int i=0; i<y_pixel; i++) // scanning through ccd
        {
            x = double(j)/(x_pixel-1)*(maxx-minx)+minx;
            y = double(i)/(y_pixel-1)*(maxy-miny)+miny;
            // calculate the interpolation in the original frame of reference
            // i.e. derotate the point using angles -l and -b
            p= {x*cos(l)+y*sin(l),-x*sin(l)+y*cos(l)};

            c=DTpointer->locate(p,lt,li);

            // Only look for the nearest point and interpolate, if the point p is inside the convex hull.
            if (lt!=Delaunay_triangulation_2::OUTSIDE_CONVEX_HULL)
            {
                // is this a critical operation?
                v=DTpointer->nearest_vertex(p,c);
                nearest=v->point();
                /* This is how it is done in the CGAL examples
                 			pair<Coord_type,bool> tmppeak=peak(nearest);
                			pair<Coord_type,bool> tmpfwhm=fwhm(nearest);
                			pair<Coord_type,bool> tmplosvel=losvel(nearest);
                			intpolpeak=tmppeak.first;
                			intpolfwhm=tmpfwhm.first;
                			intpollosvel=tmplosvel.first;*/
                intpolpeak=peakmap[nearest];
                intpolfwhm=fwhmmap[nearest];
                intpollosvel=losvelmap[nearest];
            }
            else
            {
                intpolpeak=0;
            }
            if (lambda_pixel>1)// spectroscopic study
            {
                for (int il=0; il<lambda_pixel; il++) // changed index from global variable l into il [D.Y. 17 Nov 2014]
                {
                    // lambda is made around lambda0, with a width of lambda_width
                    lambdaval=double(il)/(lambda_pixel-1)*lambda_width_in_A-lambda_width_in_A/2.;
                    tempintens=intpolpeak*exp(-pow(lambdaval-intpollosvel/speedoflight*lambda0,2)/pow(intpolfwhm,2)*4.*log(2.));
                    ind=j*lambda_pixel+il;//
                    newgrid[0][ind]=x;
                    newgrid[2][ind]=lambdaval+lambda0;
                    // this is critical, but with tasks, the ind is unique for each task, and no collision should occur
                    intens[ind]+=tempintens;// loop over z and lambda [D.Y 17 Nov 2014]
                }
            }

            if (lambda_pixel==1) // AIA imaging study. Algorithm not verified [DY 14 Nov 2014]
            {
                tempintens=intpolpeak;
                ind=j;
                newgrid[0][ind]=x;
                intens[ind]+=tempintens; // loop over z [D.Y 17 Nov 2014]
            }
            // print progress
            ++show_progress;
        }
    }
    if (commrank==0) std::cout << " Done! " << std::endl << std::flush;

    FoMo::RenderCube rendercube(goftcube);
    FoMo::tvars newdata;
    double pathlength=(maxy-miny)/(y_pixel-1);
    intens=FoMo::operator*(pathlength*1e8,intens); // assume that the coordinates are given in Mm, and convert to cm
    newdata.push_back(intens);
    rendercube.setdata(newgrid,newdata);
    rendercube.setrendermethod("CGAL2D");
    rendercube.setresolution(x_pixel,y_pixel,1,lambda_pixel,lambda_width);
    if (lambda_pixel == 1)
    {
        rendercube.setobservationtype(FoMo::Imaging);
    }
    else
    {
        rendercube.setobservationtype(FoMo::Spectroscopic);
    }
    return rendercube;
}
Ejemplo n.º 5
0
std::vector<double> tb_lt_invert_Cpp_impl(double t, const std::vector<double>& lambda1,
                                          const std::vector<double>& lambda2, const std::vector<double>& lambda3,
                                          const int Ap1, const int Bp1, const int Cp1, const int direction,
                                          const int nblocks, const double tol, int& Lmax,
                                          ParallelizationScheme& scheme) {

  //  auto start = std::chrono::steady_clock::now();

  const double double_PI  = 3.141592653589793238463, AA = 20.0;
  const int matsize = Ap1*Bp1*Cp1;

  std::vector<mytype::ComplexVector> ig;
  std::vector<double> res(matsize), yvec(matsize);

  for (int i=0; i<matsize; ++i) {
    yvec[i] = lambda1[i] + lambda2[i] + lambda3[i];
  }

  /////////////////////////////////////////////////////////////
  // The following code computes the inverse Laplace transform
  // Algorithm from Abate and Whitt using a Riemann sum
  // Levin tranform is used to accelerate the convergence
  /////////////////////////////////////////////////////////////

  ig.resize(Lmax);

  scheme.for_each( boost::make_counting_iterator(0), boost::make_counting_iterator(Lmax),
                   [&](int w) {
                     mytype::ComplexNumber s(AA/(2*t),double_PI*(w+1)/t);
                     ig[w].resize(matsize);
                     tb_lt_Cpp(s,lambda1,lambda2,lambda3,Ap1,Bp1,Cp1,direction,yvec,ig[w]);
                   });

  mytype::ComplexNumber s(AA/(2*t),0.0);
  mytype::ComplexVector psum0(matsize);
  tb_lt_Cpp(s,lambda1,lambda2,lambda3,Ap1,Bp1,Cp1,direction,yvec,psum0);

  std::for_each(boost::make_counting_iterator(0), boost::make_counting_iterator(matsize),
                [&](int i) {
                  Levin levin(tol); // A struct for Levin transform
                  double term = 1e16, sdiff = 1e16;
                  int k = 1;
                  double psum = real(psum0[i])/(2*t);
                  double sk,sk1;
                  while ((std::abs(sdiff) > 1e-16)||(std::abs(term)>1e-3)) {
                    double sgn = (k%2 == 0) ? 1.0 : -1.0;
                    term = sgn*real(ig[k-1][i])/t;
                    psum += term;
                    double omega = k*term;
                    sk = levin.next(psum,omega,1.0);
                    if (k>1) sdiff = sk - sk1;
                    k++;
                    sk1 = sk;
                    if (k > Lmax) {
                      ig.resize(Lmax+nblocks);

                      scheme.for_each( boost::make_counting_iterator(0), boost::make_counting_iterator(nblocks),
                                       [&](int w) {
                                         mytype::ComplexNumber s(AA/(2*t),double_PI*(w+Lmax+1)/t);
                                         ig[w+Lmax].resize(matsize);
                                         tb_lt_Cpp(s,lambda1,lambda2,lambda3,Ap1,Bp1,Cp1,direction,yvec,ig[w+Lmax]);
                                       });

                      Lmax += nblocks;
                    }
                  }

                  res[i] = sk1*exp(AA/2);
                });

  //    Rcpp::Rcout << "Lmax: " << Lmax;

  //  auto end = std::chrono::steady_clock::now();
  //
  //  using TimingUnits = std::chrono::microseconds;
  //  Rcpp::Rcout << "Time: " << std::chrono::duration_cast<TimingUnits>(end - start).count() << std::endl;

  return(std::move(res));
}