int gs_upathbbox(gs_state * pgs, gs_rect * pbox, bool include_moveto) { gs_fixed_rect fbox; /* box in device coordinates */ gs_rect dbox; int code = gx_path_bbox_set(pgs->path, &fbox); if (code < 0) return code; /* If the path ends with a moveto and include_moveto is true, */ /* include the moveto in the bounding box. */ if (path_last_is_moveto(pgs->path) && include_moveto) { gs_fixed_point pt; code = gx_path_current_point_inline(pgs->path, &pt); if (code < 0) return code; if (pt.x < fbox.p.x) fbox.p.x = pt.x; if (pt.y < fbox.p.y) fbox.p.y = pt.y; if (pt.x > fbox.q.x) fbox.q.x = pt.x; if (pt.y > fbox.q.y) fbox.q.y = pt.y; } /* Transform the result back to user coordinates. */ dbox.p.x = fixed2float(fbox.p.x); dbox.p.y = fixed2float(fbox.p.y); dbox.q.x = fixed2float(fbox.q.x); dbox.q.y = fixed2float(fbox.q.y); return gs_bbox_transform_inverse(&dbox, &ctm_only(pgs), pbox); }
static int hpgl_picture_frame_coords(hpgl_state_t *pgls, gs_int_rect *gl2_win) { gs_rect dev_win; /* device window */ hpgl_real_t x1 = pgls->g.picture_frame.anchor_point.x; hpgl_real_t y1 = pgls->g.picture_frame.anchor_point.y; hpgl_real_t x2 = x1 + pgls->g.picture_frame_width; hpgl_real_t y2 = y1 + pgls->g.picture_frame_height; pcl_set_ctm(pgls, false); hpgl_call(gs_transform(pgls->pgs, x1, y1, &dev_win.p)); hpgl_call(gs_transform(pgls->pgs, x2, y2, &dev_win.q)); hpgl_call(hpgl_set_plu_ctm(pgls)); /* * gs_bbox_transform_inverse puts the resulting points in the * correct order, with p < q. */ { gs_matrix mat; gs_rect pcl_win; /* pcl window */ gs_currentmatrix(pgls->pgs, &mat); hpgl_call(gs_bbox_transform_inverse(&dev_win, &mat, &pcl_win)); /* Round all coordinates to the nearest integer. */ #define set_round(e) gl2_win->e = (int)floor(pcl_win.e + 0.5) set_round(p.x); set_round(p.y); set_round(q.x); set_round(q.y); #undef set_round } /* restore the ctm */ hpgl_call(hpgl_set_ctm(pgls)); return 0; }
void xps_bounds_in_user_space(xps_context_t *ctx, gs_rect *ubox) { gx_clip_path *clip_path; gs_rect dbox; int code; code = gx_effective_clip_path(ctx->pgs, &clip_path); if (code < 0) gs_warn("gx_effective_clip_path failed"); dbox.p.x = fixed2float(clip_path->outer_box.p.x); dbox.p.y = fixed2float(clip_path->outer_box.p.y); dbox.q.x = fixed2float(clip_path->outer_box.q.x); dbox.q.y = fixed2float(clip_path->outer_box.q.y); gs_bbox_transform_inverse(&dbox, &ctm_only(ctx->pgs), ubox); }
/* Read back the bounding box in 1/72" units. */ void gx_device_bbox_bbox(gx_device_bbox * dev, gs_rect * pbbox) { gs_fixed_rect bbox; BBOX_GET_BOX(dev, &bbox); if (bbox.p.x > bbox.q.x || bbox.p.y > bbox.q.y) { /* Nothing has been written on this page. */ pbbox->p.x = pbbox->p.y = pbbox->q.x = pbbox->q.y = 0; } else { gs_rect dbox; gs_matrix mat; dbox.p.x = fixed2float(bbox.p.x); dbox.p.y = fixed2float(bbox.p.y); dbox.q.x = fixed2float(bbox.q.x); dbox.q.y = fixed2float(bbox.q.y); gs_deviceinitialmatrix((gx_device *)dev, &mat); gs_bbox_transform_inverse(&dbox, &mat, pbbox); } }
/* * This is somewhat a clone of the tile_by_steps function but one * that performs filling from and to pdf14dev (transparency) buffers. * At some point it may be desirable to do some optimization here. */ static int tile_by_steps_trans(tile_fill_trans_state_t * ptfs, int x0, int y0, int w0, int h0, gx_pattern_trans_t *fill_trans_buffer, const gx_color_tile * ptile) { int x1 = x0 + w0, y1 = y0 + h0; int i0, i1, j0, j1, i, j; gs_matrix step_matrix; /* translated by phase */ gx_pattern_trans_t *ptrans_pat = ptile->ttrans; ptfs->x0 = x0, ptfs->w0 = w0; ptfs->y0 = y0, ptfs->h0 = h0; step_matrix = ptile->step_matrix; step_matrix.tx -= ptfs->phase.x; step_matrix.ty -= ptfs->phase.y; { gs_rect bbox; /* bounding box in device space */ gs_rect ibbox; /* bounding box in stepping space */ double bbw = ptile->bbox.q.x - ptile->bbox.p.x; double bbh = ptile->bbox.q.y - ptile->bbox.p.y; double u0, v0, u1, v1; bbox.p.x = x0, bbox.p.y = y0; bbox.q.x = x1, bbox.q.y = y1; gs_bbox_transform_inverse(&bbox, &step_matrix, &ibbox); if_debug10('T', "[T]x,y=(%d,%d) w,h=(%d,%d) => (%g,%g),(%g,%g), offset=(%g,%g)\n", x0, y0, w0, h0, ibbox.p.x, ibbox.p.y, ibbox.q.x, ibbox.q.y, step_matrix.tx, step_matrix.ty); /* * If the pattern is partly transparent and XStep/YStep is smaller * than the device space BBox, we need to ensure that we cover * each pixel of the rectangle being filled with *every* pattern * that overlaps it, not just *some* pattern copy. */ u0 = ibbox.p.x - max(ptile->bbox.p.x, 0) - 0.000001; v0 = ibbox.p.y - max(ptile->bbox.p.y, 0) - 0.000001; u1 = ibbox.q.x - min(ptile->bbox.q.x, 0) + 0.000001; v1 = ibbox.q.y - min(ptile->bbox.q.y, 0) + 0.000001; if (!ptile->is_simple) u0 -= bbw, v0 -= bbh, u1 += bbw, v1 += bbh; i0 = (int)fastfloor(u0); j0 = (int)fastfloor(v0); i1 = (int)ceil(u1); j1 = (int)ceil(v1); } if_debug4('T', "[T]i=(%d,%d) j=(%d,%d)\n", i0, i1, j0, j1); for (i = i0; i < i1; i++) for (j = j0; j < j1; j++) { int x = (int)fastfloor(step_matrix.xx * i + step_matrix.yx * j + step_matrix.tx); int y = (int)fastfloor(step_matrix.xy * i + step_matrix.yy * j + step_matrix.ty); int w = ptrans_pat->width; int h = ptrans_pat->height; int xoff, yoff; int px, py; if_debug4('T', "[T]i=%d j=%d x,y=(%d,%d)", i, j, x, y); if (x < x0) xoff = x0 - x, x = x0, w -= xoff; else xoff = 0; if (y < y0) yoff = y0 - y, y = y0, h -= yoff; else yoff = 0; if (x + w > x1) w = x1 - x; if (y + h > y1) h = y1 - y; if_debug6('T', "=>(%d,%d) w,h=(%d,%d) x/yoff=(%d,%d)\n", x, y, w, h, xoff, yoff); if (w > 0 && h > 0) { px = imod(xoff - x, ptile->ttrans->width); py = imod(yoff - y, ptile->ttrans->height); /* Set the offsets for colored pattern fills */ ptfs->xoff = xoff; ptfs->yoff = yoff; /* We only go through blending during tiling, if there was overlap as defined by the step matrix and the bounding box */ ptile->ttrans->pat_trans_fill(x, y, x+w, y+h, px, py, ptile, fill_trans_buffer); } } return 0; }
/* * Fill with non-standard X and Y stepping. * ptile is pdevc->colors.pattern.{m,p}_tile. * tbits_or_tmask is whichever of tbits and tmask is supplying * the tile size. * This implementation could be sped up considerably! */ static int tile_by_steps(tile_fill_state_t * ptfs, int x0, int y0, int w0, int h0, const gx_color_tile * ptile, const gx_strip_bitmap * tbits_or_tmask, int (*fill_proc) (const tile_fill_state_t * ptfs, int x, int y, int w, int h)) { int x1 = x0 + w0, y1 = y0 + h0; int i0, i1, j0, j1, i, j; gs_matrix step_matrix; /* translated by phase */ int code; ptfs->x0 = x0, ptfs->w0 = w0; ptfs->y0 = y0, ptfs->h0 = h0; step_matrix = ptile->step_matrix; step_matrix.tx -= ptfs->phase.x; step_matrix.ty -= ptfs->phase.y; { gs_rect bbox; /* bounding box in device space */ gs_rect ibbox; /* bounding box in stepping space */ double bbw = ptile->bbox.q.x - ptile->bbox.p.x; double bbh = ptile->bbox.q.y - ptile->bbox.p.y; double u0, v0, u1, v1; bbox.p.x = x0, bbox.p.y = y0; bbox.q.x = x1, bbox.q.y = y1; gs_bbox_transform_inverse(&bbox, &step_matrix, &ibbox); if_debug10('T', "[T]x,y=(%d,%d) w,h=(%d,%d) => (%g,%g),(%g,%g), offset=(%g,%g)\n", x0, y0, w0, h0, ibbox.p.x, ibbox.p.y, ibbox.q.x, ibbox.q.y, step_matrix.tx, step_matrix.ty); /* * If the pattern is partly transparent and XStep/YStep is smaller * than the device space BBox, we need to ensure that we cover * each pixel of the rectangle being filled with *every* pattern * that overlaps it, not just *some* pattern copy. */ u0 = ibbox.p.x - max(ptile->bbox.p.x, 0) - 0.000001; v0 = ibbox.p.y - max(ptile->bbox.p.y, 0) - 0.000001; u1 = ibbox.q.x - min(ptile->bbox.q.x, 0) + 0.000001; v1 = ibbox.q.y - min(ptile->bbox.q.y, 0) + 0.000001; if (!ptile->is_simple) u0 -= bbw, v0 -= bbh, u1 += bbw, v1 += bbh; i0 = (int)fastfloor(u0); j0 = (int)fastfloor(v0); i1 = (int)ceil(u1); j1 = (int)ceil(v1); } if_debug4('T', "[T]i=(%d,%d) j=(%d,%d)\n", i0, i1, j0, j1); for (i = i0; i < i1; i++) for (j = j0; j < j1; j++) { int x = (int)fastfloor(step_matrix.xx * i + step_matrix.yx * j + step_matrix.tx); int y = (int)fastfloor(step_matrix.xy * i + step_matrix.yy * j + step_matrix.ty); int w = tbits_or_tmask->size.x; int h = tbits_or_tmask->size.y; int xoff, yoff; if_debug4('T', "[T]i=%d j=%d x,y=(%d,%d)", i, j, x, y); if (x < x0) xoff = x0 - x, x = x0, w -= xoff; else xoff = 0; if (y < y0) yoff = y0 - y, y = y0, h -= yoff; else yoff = 0; if (x + w > x1) w = x1 - x; if (y + h > y1) h = y1 - y; if_debug6('T', "=>(%d,%d) w,h=(%d,%d) x/yoff=(%d,%d)\n", x, y, w, h, xoff, yoff); if (w > 0 && h > 0) { if (ptfs->pcdev == (gx_device *) & ptfs->cdev) tile_clip_set_phase(&ptfs->cdev, imod(xoff - x, ptfs->tmask->rep_width), imod(yoff - y, ptfs->tmask->rep_height)); /* Set the offsets for colored pattern fills */ ptfs->xoff = xoff; ptfs->yoff = yoff; code = (*fill_proc) (ptfs, x, y, w, h); if (code < 0) return code; } } return 0; }
/* We separate device allocation and initialization at customer request. */ int gs_initialize_wordimagedevice(gx_device_memory * new_dev, const gs_matrix * pmat, uint width, uint height, const byte * colors, int colors_size, bool word_oriented, bool page_device, gs_memory_t * mem) { const gx_device_memory *proto_dev; int palette_count = colors_size; int num_components = 1; int pcount; int bits_per_pixel; float x_pixels_per_unit, y_pixels_per_unit; byte palette[256 * 3]; bool has_color; switch (colors_size) { case 3 * 2: palette_count = 2; num_components = 3; case 2: bits_per_pixel = 1; break; case 3 * 4: palette_count = 4; num_components = 3; case 4: bits_per_pixel = 2; break; case 3 * 16: palette_count = 16; num_components = 3; case 16: bits_per_pixel = 4; break; case 3 * 256: palette_count = 256; num_components = 3; case 256: bits_per_pixel = 8; break; case -16: bits_per_pixel = 16; palette_count = 0; break; case -24: bits_per_pixel = 24; palette_count = 0; break; case -32: bits_per_pixel = 32; palette_count = 0; break; default: return_error(gs_error_rangecheck); } proto_dev = (word_oriented ? gdev_mem_word_device_for_bits(bits_per_pixel) : gdev_mem_device_for_bits(bits_per_pixel)); if (proto_dev == 0) /* no suitable device */ return_error(gs_error_rangecheck); pcount = palette_count * 3; /* Check to make sure the palette contains white and black, */ /* and, if it has any colors, the six primaries. */ if (bits_per_pixel <= 8) { const byte *p; byte *q; int primary_mask = 0; int i; has_color = false; for (i = 0, p = colors, q = palette; i < palette_count; i++, q += 3 ) { int mask = 1; switch (num_components) { case 1: /* gray */ q[0] = q[1] = q[2] = *p++; break; default /* case 3 */ : /* RGB */ q[0] = p[0], q[1] = p[1], q[2] = p[2]; p += 3; } #define shift_mask(b,n)\ switch ( b ) { case 0xff: mask <<= n; case 0: break; default: mask = 0; } shift_mask(q[0], 4); shift_mask(q[1], 2); shift_mask(q[2], 1); #undef shift_mask primary_mask |= mask; if (q[0] != q[1] || q[0] != q[2]) has_color = true; } switch (primary_mask) { case 129: /* just black and white */ if (has_color) /* color but no primaries */ return_error(gs_error_rangecheck); case 255: /* full color */ break; default: return_error(gs_error_rangecheck); } } else has_color = true; /* * The initial transformation matrix must map 1 user unit to * 1/72". Let W and H be the width and height in pixels, and * assume the initial matrix is of the form [A 0 0 B X Y]. * Then the size of the image in user units is (W/|A|,H/|B|), * hence the size in inches is ((W/|A|)/72,(H/|B|)/72), so * the number of pixels per inch is * (W/((W/|A|)/72),H/((H/|B|)/72)), or (|A|*72,|B|*72). * Similarly, if the initial matrix is [0 A B 0 X Y] for a 90 * or 270 degree rotation, the size of the image in user * units is (W/|B|,H/|A|), so the pixels per inch are * (|B|*72,|A|*72). We forbid non-orthogonal transformation * matrices. */ if (is_fzero2(pmat->xy, pmat->yx)) x_pixels_per_unit = pmat->xx, y_pixels_per_unit = pmat->yy; else if (is_fzero2(pmat->xx, pmat->yy)) x_pixels_per_unit = pmat->yx, y_pixels_per_unit = pmat->xy; else return_error(gs_error_undefinedresult); /* All checks done, initialize the device. */ if (bits_per_pixel == 1) { /* Determine the polarity from the palette. */ gs_make_mem_device(new_dev, proto_dev, mem, (page_device ? 1 : -1), 0); /* This is somewhat bogus, but does the right thing */ /* in the only cases we care about. */ gdev_mem_mono_set_inverted(new_dev, (palette[0] | palette[1] | palette[2]) != 0); } else { byte *dev_palette = gs_alloc_string(mem, pcount, "gs_makeimagedevice(palette)"); if (dev_palette == 0) return_error(gs_error_VMerror); gs_make_mem_device(new_dev, proto_dev, mem, (page_device ? 1 : -1), 0); new_dev->palette.size = pcount; new_dev->palette.data = dev_palette; memcpy(dev_palette, palette, pcount); if (!has_color) { new_dev->color_info.num_components = 1; new_dev->color_info.max_color = 0; new_dev->color_info.dither_colors = 0; new_dev->color_info.gray_index = 0; } } /* Memory defice is always initialised as an internal device but */ /* this is an external device */ new_dev->retained = true; rc_init(new_dev, new_dev->memory, 1); new_dev->initial_matrix = *pmat; new_dev->MarginsHWResolution[0] = new_dev->HWResolution[0] = fabs(x_pixels_per_unit) * 72; new_dev->MarginsHWResolution[1] = new_dev->HWResolution[1] = fabs(y_pixels_per_unit) * 72; gx_device_set_width_height((gx_device *) new_dev, width, height); /* Set the ImagingBBox so we get a correct clipping region. */ { gs_rect bbox; bbox.p.x = 0; bbox.p.y = 0; bbox.q.x = width; bbox.q.y = height; gs_bbox_transform_inverse(&bbox, pmat, &bbox); new_dev->ImagingBBox[0] = bbox.p.x; new_dev->ImagingBBox[1] = bbox.p.y; new_dev->ImagingBBox[2] = bbox.q.x; new_dev->ImagingBBox[3] = bbox.q.y; new_dev->ImagingBBox_set = true; } /* The bitmap will be allocated when the device is opened. */ new_dev->is_open = false; new_dev->bitmap_memory = mem; return 0; }