NonseparableTransformation::~NonseparableTransformation() { _VERBOSE("NonseparableTransformation::~NonseparableTransformation"); Py_DECREF(_funcxy); }
Py::Object NonseparableTransformation::get_funcxy(const Py::Tuple & args) { _VERBOSE("NonseparableTransformation::get_funcxy"); args.verify_length(0); return Py::Object(_funcxy); }
SeparableTransformation::~SeparableTransformation() { _VERBOSE("SeparableTransformation::~SeparableTransformation"); Py_DECREF(_funcx); Py_DECREF(_funcy); }
NonseparableTransformation::NonseparableTransformation(Bbox *b1, Bbox *b2, FuncXY *funcxy) : BBoxTransformation(b1, b2), _funcxy(funcxy) { _VERBOSE("NonseparableTransformation::NonseparableTransformation"); Py_INCREF(funcxy); }
BBoxTransformation::~BBoxTransformation() { _VERBOSE("BBoxTransformation::~BBoxTransformation"); Py_DECREF(_b1); Py_DECREF(_b2); }
Py::Object BBoxTransformation::get_bbox2(const Py::Tuple & args) { _VERBOSE("BBoxTransformation::get_bbox2"); args.verify_length(0); return Py::Object(_b2); }
// this code is heavily adapted from the paint license, which is in // the file paint.license (BSD compatible) included in this // distribution. TODO, add license file to MANIFEST.in and CVS Py::Object RendererAgg::write_png(const Py::Tuple& args) { //small memory leak in this function - JDH 2004-06-08 _VERBOSE("RendererAgg::write_png"); args.verify_length(1); std::string fileName = Py::String(args[0]); const char *file_name = fileName.c_str(); FILE *fp; png_structp png_ptr; png_infop info_ptr; struct png_color_8_struct sig_bit; png_uint_32 row; png_bytep row_pointers[height]; for (row = 0; row < height; ++row) { row_pointers[row] = pixBuffer + row * width * 4; } fp = fopen(file_name, "wb"); if (fp == NULL) throw Py::RuntimeError("could not open file"); png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (png_ptr == NULL) { fclose(fp); throw Py::RuntimeError("could not create write struct"); } info_ptr = png_create_info_struct(png_ptr); if (info_ptr == NULL) { fclose(fp); png_destroy_write_struct(&png_ptr, (png_infopp)NULL); throw Py::RuntimeError("could not create info struct"); } if (setjmp(png_ptr->jmpbuf)) { fclose(fp); png_destroy_write_struct(&png_ptr, (png_infopp)NULL); throw Py::RuntimeError("error building image"); } png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); // this a a color image! sig_bit.gray = 0; sig_bit.red = 8; sig_bit.green = 8; sig_bit.blue = 8; /* if the image has an alpha channel then */ sig_bit.alpha = 8; png_set_sBIT(png_ptr, info_ptr, &sig_bit); png_write_info(png_ptr, info_ptr); png_write_image(png_ptr, row_pointers); png_write_end(png_ptr, info_ptr); png_destroy_write_struct(&png_ptr, (png_infopp)NULL); fclose(fp); return Py::Object(); }
BinOp::BinOp(LazyValue* lhs, LazyValue* rhs, int opcode) : _lhs(lhs), _rhs(rhs), _opcode(opcode) { _VERBOSE("BinOp::BinOp"); Py_INCREF(lhs); Py_INCREF(rhs); }
Py::Object RendererAgg::draw_text(const Py::Tuple& args) { _VERBOSE("RendererAgg::draw_text"); args.verify_length(4); FT2Font *font = static_cast<FT2Font*>(args[0].ptr()); int x = Py::Int( args[1] ); int y = Py::Int( args[2] ); Py::Object gc = args[3]; Py::Object o ( gc.getAttr( "_cliprect" ) ); bool useClip = o.ptr()!=Py_None; double l = 0; double b = 0; double r = width; double t = height; if (useClip) { Py::SeqBase<Py::Object> rect( o ); l = Py::Float(rect[0]) ; b = Py::Float(rect[1]) ; double w = Py::Float(rect[2]) ; double h = Py::Float(rect[3]) ; r = l+w; t = b+h; //std::cout << b << " " << h << " " << " " << t << std::endl; } agg::rgba color = get_color(gc); pixfmt::color_type p; p.r = int(255*color.r); p.b = int(255*color.b); p.g = int(255*color.g); p.a = int(255*color.a); //y = y-font->image.height; unsigned thisx, thisy; for (size_t i=0; i<font->image.width; ++i) { for (size_t j=0; j<font->image.height; ++j) { thisx = i+x+font->image.offsetx; thisy = j+y+font->image.offsety; if (thisx<l || thisx>=r) continue; if (thisy<height-t || thisy>=height-b) continue; pixFmt->blend_pixel (thisx, thisy, p, font->image.buffer[i + j*font->image.width]); } } /* bbox the text for debug purposes agg::path_storage path; path.move_to(x, y); path.line_to(x, y+font->image.height); path.line_to(x+font->image.width, y+font->image.height); path.line_to(x+font->image.width, y); path.close_polygon(); agg::rgba edgecolor(1,0,0,1); //now fill the edge agg::conv_stroke<agg::path_storage> stroke(path); stroke.width(1.0); theRenderer->color(edgecolor); //self->theRasterizer->gamma(agg::gamma_power(gamma)); theRasterizer->add_path(stroke); theRasterizer->render(*slineP8, *theRenderer); */ return Py::Object(); }
Py::Object RendererAgg::draw_lines(const Py::Tuple& args) { _VERBOSE("RendererAgg::draw_lines"); args.verify_length(3); Py::Object gc = args[0]; Py::SeqBase<Py::Object> x = args[1]; //todo: use numerix for efficiency Py::SeqBase<Py::Object> y = args[2]; //todo: use numerix for efficiency set_clip_rectangle(gc); size_t Nx = x.length(); size_t Ny = y.length(); if (Nx!=Ny) throw Py::ValueError("x and y must be equal length sequences"); if (Nx<2) throw Py::ValueError("x and y must have length >= 2"); agg::gen_stroke::line_cap_e cap = get_linecap(gc); agg::gen_stroke::line_join_e join = get_joinstyle(gc); double lw = points_to_pixels ( gc.getAttr("_linewidth") ) ; //std::cout << "agg lw " << lw << std::endl; agg::rgba color = get_color(gc); // process the dashes Py::Tuple dashes = get_dashes(gc); bool useDashes = dashes[0].ptr() != Py_None; double offset = 0; Py::SeqBase<Py::Object> dashSeq; if ( dashes[0].ptr() != Py_None ) { // use dashes //TODO: use offset offset = points_to_pixels_snapto(dashes[0]); dashSeq = dashes[1]; }; agg::path_storage path; int isaa = antialiased(gc); if (Nx==2) { // this is a little hack - len(2) lines are probably grid and // ticks so I'm going to snap to pixel //printf("snapto %d\n", Nx); double x0 = Py::Float(x[0]); double y0 = Py::Float(y[0]); double x1 = Py::Float(x[1]); double y1 = Py::Float(y[1]); if (x0==x1) { x0 = (int)x0 + 0.5; x1 = (int)x1 + 0.5; } if (y0==y1) { y0 = (int)y0 + 0.5; y1 = (int)y1 + 0.5; } y0 = height-y0; y1 = height-y1; path.move_to(x0, y0); path.line_to(x1, y1); } else { double thisX = Py::Float( x[0] ); double thisY = Py::Float( y[0] ); thisY = height - thisY; //flipy path.move_to(thisX, thisY); for (size_t i=1; i<Nx; ++i) { thisX = Py::Float( x[i] ); thisY = Py::Float( y[i] ); thisY = height - thisY; //flipy path.line_to(thisX, thisY); } } if (! useDashes ) { agg::conv_stroke<agg::path_storage> stroke(path); stroke.line_cap(cap); stroke.line_join(join); stroke.width(lw); //freeze was here std::cout << "\t adding path!" << std::endl; theRasterizer->add_path(stroke); } else { // set the dashes //TODO: scale for DPI size_t N = dashSeq.length(); if (N%2 != 0 ) throw Py::ValueError("dashes must be an even length sequence"); typedef agg::conv_dash<agg::path_storage> dash_t; dash_t dash(path); double on, off; for (size_t i=0; i<N/2; i+=1) { on = points_to_pixels_snapto(dashSeq[2*i]); off = points_to_pixels_snapto(dashSeq[2*i+1]); dash.add_dash(on, off); } agg::conv_stroke<dash_t> stroke(dash); stroke.line_cap(cap); stroke.line_join(join); stroke.width(lw); theRasterizer->add_path(stroke); } if ( isaa ) { theRenderer->color(color); theRasterizer->render(*slineP8, *theRenderer); } else { rendererBin->color(color); theRasterizer->render(*slineBin, *rendererBin); } return Py::Object(); }
Py::Object RendererAgg::draw_regpoly_collection(const Py::Tuple& args) { _VERBOSE("RendererAgg::draw_regpoly_collection"); args.verify_length(9); set_clip_from_bbox(args[0]); Py::SeqBase<Py::Object> offsets = args[1]; // this is throwing even though the instance is a Transformation! //if (!Transformation::check(args[2])) // throw Py::TypeError("RendererAgg::draw_regpoly_collection(clipbox, offsets, transOffset, verts, ...) expected a Transformation instance for transOffset"); Transformation* transOffset = static_cast<Transformation*>(args[2].ptr()); transOffset->eval_scalars(); Py::SeqBase<Py::Object> verts = args[3]; Py::SeqBase<Py::Object> sizes = args[4]; Py::SeqBase<Py::Object> facecolors = args[5]; Py::SeqBase<Py::Object> edgecolors = args[6]; Py::SeqBase<Py::Object> linewidths = args[7]; Py::SeqBase<Py::Object> antialiaseds = args[8]; size_t Noffsets = offsets.length(); size_t Nverts = verts.length(); size_t Nsizes = sizes.length(); size_t Nface = facecolors.length(); size_t Nedge = edgecolors.length(); size_t Nlw = linewidths.length(); size_t Naa = antialiaseds.length(); double thisx, thisy; // dump the x.y vertices into a double array for faster access double xverts[Nverts]; double yverts[Nverts]; Py::Tuple xy; for (size_t i=0; i<Nverts; ++i) { xy = Py::Tuple(verts[i]); xverts[i] = Py::Float(xy[0]); yverts[i] = Py::Float(xy[1]); } std::pair<double, double> offsetPair; for (size_t i=0; i<Noffsets; ++i) { Py::Tuple pos = Py::Tuple(offsets[i]); double xo = Py::Float(pos[0]); double yo = Py::Float(pos[1]); offsetPair = transOffset->operator()(xo, yo); double scale = Py::Float(sizes[i%Nsizes]); agg::path_storage path; for (size_t j=0; j<Nverts; ++j) { thisx = scale*xverts[j] + offsetPair.first; thisy = scale*yverts[j] + offsetPair.second; thisy = height - thisy; if (j==0) path.move_to(thisx, thisy); else path.line_to(thisx, thisy); } path.close_polygon(); int isaa = Py::Int(antialiaseds[i%Naa]); // get the facecolor and render Py::Tuple rgba = Py::Tuple(facecolors[ i%Nface]); double r = Py::Float(rgba[0]); double g = Py::Float(rgba[1]); double b = Py::Float(rgba[2]); double a = Py::Float(rgba[3]); if (a>0) { //only render if alpha>0 agg::rgba facecolor(r, g, b, a); theRasterizer->add_path(path); if (isaa) { theRenderer->color(facecolor); theRasterizer->render(*slineP8, *theRenderer); } else { rendererBin->color(facecolor); theRasterizer->render(*slineBin, *rendererBin); } } //renderer face // get the edgecolor and render rgba = Py::Tuple(edgecolors[ i%Nedge]); r = Py::Float(rgba[0]); g = Py::Float(rgba[1]); b = Py::Float(rgba[2]); a = Py::Float(rgba[3]); if (a>0) { //only render if alpha>0 agg::rgba edgecolor(r, g, b, a); agg::conv_stroke<agg::path_storage> stroke(path); //stroke.line_cap(cap); //stroke.line_join(join); double lw = points_to_pixels ( Py::Float( linewidths[i%Nlw] ) ); stroke.width(lw); theRasterizer->add_path(stroke); // render antialiased or not if ( isaa ) { theRenderer->color(edgecolor); theRasterizer->render(*slineP8, *theRenderer); } else { rendererBin->color(edgecolor); theRasterizer->render(*slineBin, *rendererBin); } } //rendered edge } // for every poly return Py::Object(); }
Py::Object Bbox::update_numerix(const Py::Tuple &args) { //update the box from the numerix arrays x and y _VERBOSE("Bbox::update_numerix"); args.verify_length(3); Py::Object xo = args[0]; Py::Object yo = args[1]; PyArrayObject *x = (PyArrayObject *) PyArray_ContiguousFromObject(xo.ptr(), PyArray_DOUBLE, 1, 1); if (x==NULL) throw Py::TypeError("Bbox::update_numerix expected numerix array"); PyArrayObject *y = (PyArrayObject *) PyArray_ContiguousFromObject(yo.ptr(), PyArray_DOUBLE, 1, 1); if (y==NULL) throw Py::TypeError("Bbox::update_numerix expected numerix array"); size_t Nx = x->dimensions[0]; size_t Ny = y->dimensions[0]; if (Nx!=Ny) throw Py::ValueError("x and y must be equal length sequences"); //don't use current bounds when updating box if ignore==1 if (Nx==0) return Py::Object(); double minx = _ll->xval(); double maxx = _ur->xval(); double miny = _ll->yval(); double maxy = _ur->yval(); double thisx, thisy; int ignore = Py::Int(args[2]); if (ignore) { int xok=0; int yok=0; // loop through values until we find some nans... for (size_t i=0; i< Nx; ++i) { thisx = *(double *)(x->data + i*x->strides[0]); thisy = *(double *)(y->data + i*y->strides[0]); if (!xok) { if (!MPL_isnan64(thisx)) { minx=thisx; maxx=thisx; xok=1; } } if (!yok) { if (!MPL_isnan64(thisy)) { miny=thisy; maxy=thisy; yok=1; } } if (xok && yok) break; } } for (size_t i=0; i< Nx; ++i) { thisx = *(double *)(x->data + i*x->strides[0]); thisy = *(double *)(y->data + i*y->strides[0]); _posx.update(thisx); _posy.update(thisy); if (thisx<minx) minx=thisx; if (thisx>maxx) maxx=thisx; if (thisy<miny) miny=thisy; if (thisy>maxy) maxy=thisy; } Py_XDECREF(x); Py_XDECREF(y); _ll->x_api()->set_api(minx); _ll->y_api()->set_api(miny); _ur->x_api()->set_api(maxx); _ur->y_api()->set_api(maxy); return Py::Object(); }
Value::~Value() { _VERBOSE("Value::~Value"); }
Py::Object Transformation::numerix_x_y(const Py::Tuple & args) { _VERBOSE("Transformation::numerix_x_y"); args.verify_length(2); Py::Object xo = args[0]; Py::Object yo = args[1]; PyArrayObject *x = (PyArrayObject *) PyArray_ContiguousFromObject(xo.ptr(), PyArray_DOUBLE, 1, 1); if (x==NULL) throw Py::TypeError("Transformation::numerix_x_y expected numerix array"); PyArrayObject *y = (PyArrayObject *) PyArray_ContiguousFromObject(yo.ptr(), PyArray_DOUBLE, 1, 1); if (y==NULL) throw Py::TypeError("Transformation::numerix_x_y expected numerix array"); size_t Nx = x->dimensions[0]; size_t Ny = y->dimensions[0]; if (Nx!=Ny) throw Py::ValueError("x and y must be equal length sequences"); // evaluate the lazy objects if (!_frozen) eval_scalars(); int dimensions[1]; dimensions[0] = Nx; PyArrayObject *retx = (PyArrayObject *)PyArray_FromDims(1,dimensions,PyArray_DOUBLE); if (retx==NULL) { Py_XDECREF(x); Py_XDECREF(y); throw Py::RuntimeError("Could not create return x array"); } PyArrayObject *rety = (PyArrayObject *)PyArray_FromDims(1,dimensions,PyArray_DOUBLE); if (rety==NULL) { Py_XDECREF(x); Py_XDECREF(y); throw Py::RuntimeError("Could not create return x array"); } for (size_t i=0; i< Nx; ++i) { double thisx = *(double *)(x->data + i*x->strides[0]); double thisy = *(double *)(y->data + i*y->strides[0]); //std::cout << "calling operator " << thisx << " " << thisy << " " << std::endl; this->operator()(thisx, thisy); *(double *)(retx->data + i*retx->strides[0]) = xy.first; *(double *)(rety->data + i*rety->strides[0]) = xy.second; } Py_XDECREF(x); Py_XDECREF(y); Py::Tuple ret(2); ret[0] = Py::Object((PyObject*)retx); ret[1] = Py::Object((PyObject*)rety); Py_XDECREF(retx); Py_XDECREF(rety); return ret; }
Py::Object RendererAgg::draw_line_collection(const Py::Tuple& args) { _VERBOSE("RendererAgg::draw_line_collection"); args.verify_length(8); //segments, trans, clipbox, colors, linewidths, antialiaseds Py::SeqBase<Py::Object> segments = args[0]; /* this line is broken, mysteriously if (!Transformation::check(args[1])) throw Py::TypeError("RendererAgg::draw_line_collection(segments, transform, ...) expected a Transformation instance for transform"); */ Transformation* transform = static_cast<Transformation*>(args[1].ptr()); set_clip_from_bbox(args[2]); Py::SeqBase<Py::Object> colors = args[3]; Py::SeqBase<Py::Object> linewidths = args[4]; Py::SeqBase<Py::Object> antialiaseds = args[5]; bool usingOffsets = args[6].ptr()!=Py_None; Py::SeqBase<Py::Object> offsets; Transformation* transOffset=NULL; if (usingOffsets) { /* this line is broken, mysteriously if (!Transformation::check(args[7])) throw Py::TypeError("RendererAgg::draw_line_collection expected a Transformation instance for transOffset"); */ offsets = Py::SeqBase<Py::Object>(args[6]); transOffset = static_cast<Transformation*>(args[7].ptr()); } size_t Nsegments = segments.length(); size_t Nc = colors.length(); size_t Nlw = linewidths.length(); size_t Naa = antialiaseds.length(); size_t Noffsets = 0; size_t N = Nsegments; if (usingOffsets) { Noffsets = offsets.length(); if (Noffsets>Nsegments) N = Noffsets; } Py::Tuple xyo, pos; for (size_t i=0; i<N; ++i) { pos = Py::Tuple(segments[i%Nsegments]); double x0 = Py::Float(pos[0]); double y0 = Py::Float(pos[1]); double x1 = Py::Float(pos[2]); double y1 = Py::Float(pos[3]); std::pair<double, double> xy = transform->operator()(x0,y0); x0 = xy.first; y0 = xy.second; xy = transform->operator()(x1,y1); x1 = xy.first; y1 = xy.second; if (usingOffsets) { xyo = Py::Tuple(offsets[i%Noffsets]); double xo = Py::Float(xyo[0]); double yo = Py::Float(xyo[1]); std::pair<double, double> xy = transOffset->operator()(xo,yo); x0 += xy.first; y0 += xy.second; x1 += xy.first; y1 += xy.second; } //snap x to pixel for verical lines if (x0==x1) { x0 = (int)x0 + 0.5; x1 = (int)x1 + 0.5; } //snap y to pixel for horizontal lines if (y0==y1) { y0 = (int)y0 + 0.5; y1 = (int)y1 + 0.5; } agg::path_storage path; path.move_to(x0, height-y0); path.line_to(x1, height-y1); agg::conv_stroke<agg::path_storage> stroke(path); //stroke.line_cap(cap); //stroke.line_join(join); double lw = points_to_pixels ( Py::Float( linewidths[i%Nlw] ) ); stroke.width(lw); theRasterizer->add_path(stroke); // get the color and render Py::Tuple rgba = Py::Tuple(colors[ i%Nc]); double r = Py::Float(rgba[0]); double g = Py::Float(rgba[1]); double b = Py::Float(rgba[2]); double a = Py::Float(rgba[3]); agg::rgba color(r, g, b, a); // render antialiased or not int isaa = Py::Int(antialiaseds[i%Naa]); if ( isaa ) { theRenderer->color(color); theRasterizer->render(*slineP8, *theRenderer); } else { rendererBin->color(color); theRasterizer->render(*slineBin, *rendererBin); } } //for every segment return Py::Object(); }
Py::Object Transformation::get_funcxy(const Py::Tuple & args) { _VERBOSE("Transformation::get_funcxy"); throw Py::RuntimeError("This transformation does not support get_funcxy"); return Py::Object(); }
Py::Object Transformation::set_bbox2(const Py::Tuple & args) { _VERBOSE("Transformation::set_bbox2"); throw Py::RuntimeError("This transformation does not support set_bbox1"); return Py::Object(); }
Func::~Func() { _VERBOSE("Func::~Func"); }
Py::Object Transformation::as_vec6(const Py::Tuple & args) { _VERBOSE("Transformation::as_vec6"); throw Py::RuntimeError("This transformation does not support as_vec6"); return Py::Object(); }
Bbox::~Bbox() { _VERBOSE("Bbox::~Bbox"); Py_DECREF(_ll); Py_DECREF(_ur); }
Py::Object Bbox::deepcopy(const Py::Tuple &args) { _VERBOSE("Bbox::deepcopy"); args.verify_length(0); return _deepcopy(); }
Interval::~Interval() { _VERBOSE("Interval::~Interval"); Py_DECREF(_val1); Py_DECREF(_val2); }
Bbox::Bbox(Point* ll, Point* ur) : _ll(ll), _ur(ur) { _VERBOSE("Bbox::Bbox"); Py_INCREF(ll); Py_INCREF(ur); };
Point::Point(LazyValue* x, LazyValue* y) : _x(x), _y(y) { _VERBOSE("Point::Point"); Py_INCREF(x); Py_INCREF(y); }
Interval::Interval(LazyValue* val1, LazyValue* val2) : _val1(val1), _val2(val2) { _VERBOSE("Interval::Interval"); Py_INCREF(val1); Py_INCREF(val2); };
Py::Object NonseparableTransformation::deepcopy(const Py::Tuple &args) { _VERBOSE("NonseparableTransformation::deepcopy"); args.verify_length(0); return Py::asObject( new NonseparableTransformation( static_cast<Bbox*>((_b1->_deepcopy()).ptr()),static_cast<Bbox*>((_b2->_deepcopy()).ptr()), _funcxy )); }
void NonseparableTransformation::eval_scalars(void) { _VERBOSE("NonseparableTransformation::eval_scalars"); std::pair<double, double> xyminIn = _funcxy-> operator()( _b1->ll_api()->xval(), _b1->ll_api()->yval()); std::pair<double, double> xymaxIn = _funcxy-> operator()( _b1->ur_api()->xval(), _b1->ur_api()->yval()); std::pair<double, double> xyminOut( _b2->ll_api()->xval(), _b2->ll_api()->yval() ); std::pair<double, double> xymaxOut( _b2->ur_api()->xval(), _b2->ur_api()->yval() ); double widthIn = xymaxIn.first - xyminIn.first; double widthOut = xymaxOut.first - xyminOut.first; double heightIn = xymaxIn.second - xyminIn.second; double heightOut = xymaxOut.second - xyminOut.second; if (widthIn==0) throw Py::ZeroDivisionError("NonseparableTransformation::eval_scalars xin interval is zero; cannot transform"); if (heightIn==0) throw Py::ZeroDivisionError("NonseparableTransformation::eval_scalars yin interval is zero; cannot transform"); _sx = widthOut/widthIn; _sy = heightOut/heightIn; _tx = -xyminIn.first*_sx + xyminOut.first; _ty = -xyminIn.second*_sy + xyminOut.second; /* std::cout <<"corners in " << xyminIn.first << " " << xyminIn.second << " " << xymaxIn.first << " " << xymaxIn.second << std::endl; std::cout <<"w,h in " << widthIn << " " << heightIn << std::endl; std::cout <<"heightout, heightin = " << heightOut << " " << heightIn << std::endl; std::cout <<"sx,sy,tx,ty = " << _sx << " " << _sy << " " << _tx << " " << _ty << std::endl; */ //now do the inverse mapping if ( (widthOut==0) || (widthOut==0) ) { _invertible = false; } else { _isx = widthIn/widthOut; _isy = heightIn/heightOut; _itx = -xyminOut.first*_isx + xyminIn.first; _ity = -xyminOut.second*_isy + xyminIn.second; } if (_usingOffset) { _transOffset->eval_scalars(); _transOffset->operator()(_xo, _yo); _xot = _transOffset->xy.first; _yot = _transOffset->xy.second; } }
BinOp::~BinOp() { _VERBOSE("BinOp::~BinOp"); Py_DECREF(_lhs); Py_DECREF(_rhs); }
Py::Object RendererAgg::draw_poly_collection(const Py::Tuple& args) { _VERBOSE("RendererAgg::draw_poly_collection"); args.verify_length(9); Py::SeqBase<Py::Object> verts = args[0]; //todo: fix transformation check Transformation* transform = static_cast<Transformation*>(args[1].ptr()); transform->eval_scalars(); set_clip_from_bbox(args[2]); Py::SeqBase<Py::Object> facecolors = args[3]; Py::SeqBase<Py::Object> edgecolors = args[4]; Py::SeqBase<Py::Object> linewidths = args[5]; Py::SeqBase<Py::Object> antialiaseds = args[6]; Py::SeqBase<Py::Object> offsets; Transformation* transOffset = NULL; bool usingOffsets = args[7].ptr() != Py_None; if (usingOffsets) { offsets = args[7]; //todo: fix transformation check transOffset = static_cast<Transformation*>(args[8].ptr()); transOffset->eval_scalars(); } size_t Noffsets = offsets.length(); size_t Nverts = verts.length(); size_t Nface = facecolors.length(); size_t Nedge = edgecolors.length(); size_t Nlw = linewidths.length(); size_t Naa = antialiaseds.length(); size_t N = (Noffsets>Nverts) ? Noffsets : Nverts; std::pair<double, double> xyo, xy; Py::Tuple thisverts; for (size_t i=0; i<N; ++i) { thisverts = verts[i % Nverts]; if (usingOffsets) { Py::Tuple pos = Py::Tuple(offsets[i]); double xo = Py::Float(pos[0]); double yo = Py::Float(pos[1]); xyo = transOffset->operator()(xo, yo); } size_t Nverts = thisverts.length(); agg::path_storage path; Py::Tuple thisvert; // dump the verts to double arrays so we can do more efficient // look aheads and behinds when doing snapto pixels double xs[Nverts], ys[Nverts]; for (size_t j=0; j<Nverts; ++j) { thisvert = Py::Tuple(thisverts[j]); double x = Py::Float(thisvert[0]); double y = Py::Float(thisvert[1]); xy = transform->operator()(x, y); if (usingOffsets) { xy.first += xyo.first; xy.second += xyo.second; } xy.second = height - xy.second; xs[j] = xy.first; ys[j] = xy.second; } for (size_t j=0; j<Nverts; ++j) { double x = xs[j]; double y = ys[j]; if (j==0) { if (xs[j] == xs[Nverts-1]) x = (int)xs[j] + 0.5; if (ys[j] == ys[Nverts-1]) y = (int)ys[j] + 0.5; } else if (j==Nverts-1) { if (xs[j] == xs[0]) x = (int)xs[j] + 0.5; if (ys[j] == ys[0]) y = (int)ys[j] + 0.5; } if (j < Nverts-1) { if (xs[j] == xs[j+1]) x = (int)xs[j] + 0.5; if (ys[j] == ys[j+1]) y = (int)ys[j] + 0.5; } if (j>0) { if (xs[j] == xs[j-1]) x = (int)xs[j] + 0.5; if (ys[j] == ys[j-1]) y = (int)ys[j] + 0.5; } if (j==0) path.move_to(x,y); else path.line_to(x,y); } path.close_polygon(); int isaa = Py::Int(antialiaseds[i%Naa]); // get the facecolor and render Py::Tuple rgba = Py::Tuple(facecolors[ i%Nface]); double r = Py::Float(rgba[0]); double g = Py::Float(rgba[1]); double b = Py::Float(rgba[2]); double a = Py::Float(rgba[3]); if (a>0) { //only render if alpha>0 agg::rgba facecolor(r, g, b, a); theRasterizer->add_path(path); if (isaa) { theRenderer->color(facecolor); theRasterizer->render(*slineP8, *theRenderer); } else { rendererBin->color(facecolor); theRasterizer->render(*slineBin, *rendererBin); } } //renderer face // get the edgecolor and render rgba = Py::Tuple(edgecolors[ i%Nedge]); r = Py::Float(rgba[0]); g = Py::Float(rgba[1]); b = Py::Float(rgba[2]); a = Py::Float(rgba[3]); if (a>0) { //only render if alpha>0 agg::rgba edgecolor(r, g, b, a); agg::conv_stroke<agg::path_storage> stroke(path); //stroke.line_cap(cap); //stroke.line_join(join); double lw = points_to_pixels ( Py::Float( linewidths[i%Nlw] ) ); stroke.width(lw); theRasterizer->add_path(stroke); // render antialiased or not if ( isaa ) { theRenderer->color(edgecolor); theRasterizer->render(*slineP8, *theRenderer); } else { rendererBin->color(edgecolor); theRasterizer->render(*slineBin, *rendererBin); } } //rendered edge } // for every poly return Py::Object(); }
Py::Object Transformation::nonlinear_only_numerix(const Py::Tuple & args, const Py::Dict &kwargs) { _VERBOSE("Transformation::nonlinear_only_numerix"); args.verify_length(2); int returnMask = false; if (kwargs.hasKey("returnMask")) { returnMask = Py::Int(kwargs["returnMask"]); } Py::Object xo = args[0]; Py::Object yo = args[1]; PyArrayObject *x = (PyArrayObject *) PyArray_ContiguousFromObject(xo.ptr(), PyArray_DOUBLE, 1, 1); if (x==NULL) throw Py::TypeError("Transformation::nonlinear_only_numerix expected numerix array"); PyArrayObject *y = (PyArrayObject *) PyArray_ContiguousFromObject(yo.ptr(), PyArray_DOUBLE, 1, 1); if (y==NULL) throw Py::TypeError("Transformation::nonlinear_only_numerix expected numerix array"); size_t Nx = x->dimensions[0]; size_t Ny = y->dimensions[0]; if (Nx!=Ny) throw Py::ValueError("x and y must be equal length sequences"); int dimensions[1]; dimensions[0] = Nx; PyArrayObject *retx = (PyArrayObject *)PyArray_FromDims(1,dimensions,PyArray_DOUBLE); if (retx==NULL) { Py_XDECREF(x); Py_XDECREF(y); throw Py::RuntimeError("Could not create return x array"); } PyArrayObject *rety = (PyArrayObject *)PyArray_FromDims(1,dimensions,PyArray_DOUBLE); if (rety==NULL) { Py_XDECREF(x); Py_XDECREF(y); Py_XDECREF(retx); throw Py::RuntimeError("Could not create return x array"); } PyArrayObject *retmask = NULL; if (returnMask) { retmask = (PyArrayObject *)PyArray_FromDims(1,dimensions,PyArray_UBYTE); if (retmask==NULL) { Py_XDECREF(x); Py_XDECREF(y); Py_XDECREF(retx); Py_XDECREF(rety); throw Py::RuntimeError("Could not create return mask array"); } } for (size_t i=0; i< Nx; ++i) { double thisx = *(double *)(x->data + i*x->strides[0]); double thisy = *(double *)(y->data + i*y->strides[0]); try { this->nonlinear_only_api(&thisx, &thisy); } catch(...) { if (returnMask) { *(unsigned char *)(retmask->data + i*retmask->strides[0]) = 0; *(double *)(retx->data + i*retx->strides[0]) = 0.0; *(double *)(rety->data + i*rety->strides[0]) = 0.0; continue; } else { throw Py::ValueError("Domain error on this->nonlinear_only_api(&thisx, &thisy) in Transformation::nonlinear_only_numerix"); } } *(double *)(retx->data + i*retx->strides[0]) = thisx; *(double *)(rety->data + i*rety->strides[0]) = thisy; if (returnMask) { *(unsigned char *)(retmask->data + i*retmask->strides[0]) = 1; } } Py_XDECREF(x); Py_XDECREF(y); if (returnMask) { Py::Tuple ret(3); ret[0] = Py::Object((PyObject*)retx); ret[1] = Py::Object((PyObject*)rety); ret[2] = Py::Object((PyObject*)retmask); Py_XDECREF(retx); Py_XDECREF(rety); Py_XDECREF(retmask); return ret; } else { Py::Tuple ret(2); ret[0] = Py::Object((PyObject*)retx); ret[1] = Py::Object((PyObject*)rety); Py_XDECREF(retx); Py_XDECREF(rety); return ret; } }