/* or to 0 if we are at the end of the chunk, and return -1. */ int gx_bits_cache_alloc(gx_bits_cache * bc, uint32_t lsize, gx_cached_bits_head ** pcbh) { #define ssize ((uint)lsize) uint32_t lsize1 = lsize + sizeof(gx_cached_bits_head); #define ssize1 ((uint)lsize1) uint cnext = bc->cnext; gx_bits_cache_chunk *bck = bc->chunks; uint left = bck->size - cnext; gx_cached_bits_head *cbh; gx_cached_bits_head *cbh_next; uint fsize = 0; if (lsize1 > bck->size - cnext && lsize != left) { /* Not enough room to allocate in this chunk. */ *pcbh = 0; return -1; } /* Look for and/or free enough space. */ cbh = cbh_next = (gx_cached_bits_head *) (bck->data + cnext); while (fsize < ssize1 && fsize != ssize) { if (!cb_head_is_free(cbh_next)) { /* Ask the caller to free the entry. */ if (fsize) cbh->size = fsize; *pcbh = cbh_next; return -1; } fsize += cbh_next->size; if_debug2('K', "[K]merging free bits 0x%lx(%u)\n", (uint32_t) cbh_next, cbh_next->size); cbh_next = (gx_cached_bits_head *) ((byte *) cbh + fsize); } if (fsize > ssize) { /* fsize >= ssize1 */ cbh_next = (gx_cached_bits_head *) ((byte *) cbh + ssize); cbh_next->size = fsize - ssize; cb_head_set_free(cbh_next); if_debug2('K', "[K]shortening bits 0x%lx by %u (initial)\n", (uint32_t) cbh, fsize - ssize); } gs_alloc_fill(cbh, gs_alloc_fill_block, ssize); cbh->size = ssize; bc->bsize += ssize; bc->csize++; bc->cnext += ssize; bck->allocated += ssize; *pcbh = cbh; return 0; #undef ssize #undef ssize1 }
int gs_setopacityalpha(gs_state *pgs, floatp alpha) { if_debug2('v', "[v](0x%lx)opacity.alpha = %g\n", (ulong)pgs, alpha); pgs->opacity.alpha = (alpha < 0.0 ? 0.0 : alpha > 1.0 ? 1.0 : alpha); return 0; }
int gs_setshapealpha(gs_state *pgs, floatp alpha) { if_debug2('v', "[v](0x%lx)shape.alpha = %g\n", (ulong)pgs, alpha); pgs->shape.alpha = (alpha < 0.0 ? 0.0 : alpha > 1.0 ? 1.0 : alpha); return 0; }
/* Initialize a stream for reading an OS file. */ void sread_fileno(register stream * s, FILE * file, byte * buf, uint len) { static const stream_procs p = { s_fileno_available, s_fileno_read_seek, s_std_read_reset, s_std_read_flush, s_fileno_read_close, s_fileno_read_process, s_fileno_switch }; /* * There is no really portable way to test seekability, * but this should work on most systems. */ int fd = fileno(file); long curpos = ltell(fd); bool seekable = (curpos != -1L && lseek(fd, curpos, SEEK_SET) != -1L); s_std_init(s, buf, len, &p, (seekable ? s_mode_read + s_mode_seek : s_mode_read)); if_debug2('s', "[s]read file=0x%lx, fd=%d\n", (ulong) file, fileno(file)); s->file = file; s->file_modes = s->modes; s->file_offset = 0; s->file_limit = max_long; }
static void * gslt_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements, client_name_t cname) { byte *ptr; byte *bptr = (byte *)obj; /* get the type from the old object */ gs_memory_type_ptr_t objs_type = get_type(obj); /* type-and-size header size */ ulong header_size = round_up_to_align(1) + round_up_to_align(1); /* get new object's size */ ulong new_size = (objs_type->ssize * new_num_elements) + header_size; /* replace the size field */ ptr = (byte *)realloc(&bptr[-header_size], new_size); if ( !ptr ) return NULL; num_resize_called ++; /* da for debug allocator - so scripts can parse the trace */ if_debug2('A', "[da]:realloc:%p:%s\n", ptr, cname ); /* we reset size and type - the type in case realloc moved us */ set_size(ptr, new_size - header_size); set_type(ptr, objs_type); return &ptr[round_up_to_align(1) * 2]; }
gsicc_link_cache_t * gsicc_cache_new(gs_memory_t *memory) { gsicc_link_cache_t *result; /* We want this to be maintained in stable_memory. It should be be effected by the save and restores */ result = gs_alloc_struct(memory->stable_memory, gsicc_link_cache_t, &st_icc_linkcache, "gsicc_cache_new"); if ( result == NULL ) return(NULL); result->lock = gx_monitor_alloc(memory->stable_memory); result->wait = gx_semaphore_alloc(memory->stable_memory); if (result->lock == NULL || result->wait == NULL) { gs_free_object(memory->stable_memory, result, "gsicc_cache_new"); return(NULL); } result->num_waiting = 0; rc_init_free(result, memory->stable_memory, 1, rc_gsicc_link_cache_free); result->head = NULL; result->num_links = 0; result->memory = memory->stable_memory; if_debug2(gs_debug_flag_icc,"[icc] Allocating link cache = 0x%x memory = 0x%x\n", result, result->memory); return(result); }
int gs_settextknockout(gs_state *pgs, bool knockout) { if_debug2('v', "[v](0x%lx)text_knockout = %s\n", (ulong)pgs, (knockout ? "true" : "false")); pgs->text_knockout = knockout; return 0; }
static int svg_setlogop(gx_device_vector *vdev, gs_logical_operation_t lop, gs_logical_operation_t diff) { if_debug2('_', "svg_setlogop(%u,%u) set logical operation\n", lop, diff); /* SVG can fake some simpler modes, but we ignore this for now. */ return 0; }
static int vu_tag_dispatch(px_args_t * par, px_state_t * pxs) { px_vendor_state_t *v_state = pxs->vendor_state; if (v_state->state == vu_blank) { if (par->source.available < 6) return pxNeedData; v_state->tag.tag_id = uint16at(par->source.data, pxs->data_source_big_endian); v_state->tag.bytes_expected = uint32at(par->source.data + 2, pxs->data_source_big_endian); v_state->tag.bytes_so_far = 0; v_state->state = vu_tagged; memset(v_state->row, 0xff, v_state->data_per_row); par->source.data += 6; par->source.available -= 6; par->source.position += 6; if_debug2('I', "Signature %04X expect=%d\n", v_state->tag.tag_id, v_state->tag.bytes_expected); }; if (v_state->state != vu_blank) { if_debug4('I', "tag %04X bytes=%d/%d avail=%d\n", v_state->tag.tag_id, v_state->tag.bytes_so_far, v_state->tag.bytes_expected, par->source.available); switch (v_state->tag.tag_id) { case 0x9031: /* CLJ3600 specific; 3550 returns IllegalTag */ case 0x9011: case 0x9021: return tag_dispatch_90X1(par, pxs); break; case 0x1001: /* do nothing */ if (v_state->tag.bytes_expected == 0) { v_state->state = vu_blank; return 0; } /* probably should return error */ return tag_dispatch_generic(par, pxs); break; case 0x8000: return tag_dispatch_8000(par, pxs); break; case 0x8001: return tag_dispatch_generic(par, pxs); break; default: return_error(errorIllegalTag); break; } } /* unreachable */ return pxNeedData; }
int gx_end_transparency_mask(gs_imager_state * pis, gx_device * pdev, const gs_pdf14trans_params_t * pparams) { if_debug2('v', "[v](0x%lx)gx_end_transparency_mask(%d)\n", (ulong)pis, (int)pparams->csel); if (dev_proc(pdev, end_transparency_mask) != 0) return (*dev_proc(pdev, end_transparency_mask)) (pdev, NULL); else return 0; }
static int svg_print_path_type(gx_device_svg *svg, gx_path_type_t type) { const char *path_type_names[] = {"winding number", "fill", "stroke", "fill and stroke", "clip"}; if (type <= 4) if_debug2('_', "type %d (%s)", type, path_type_names[type]); else if_debug1('_', "type %d", type); return 0; }
int gs_init_transparency_mask(gs_state *pgs, gs_transparency_channel_selector_t csel) { gs_pdf14trans_params_t params = { 0 }; if_debug2('v', "[v](0x%lx)gs_init_transparency_mask(%d)\n", (ulong)pgs, (int)csel); params.pdf14_op = PDF14_INIT_TRANS_MASK; params.csel = csel; return gs_state_update_pdf14trans(pgs, ¶ms); }
int gs_end_transparency_mask(gs_state *pgs, gs_transparency_channel_selector_t csel) { gs_pdf14trans_params_t params = { 0 }; if_debug2('v', "[v](0x%lx)gs_end_transparency_mask(%d)\n", (ulong)pgs, (int)csel); params.pdf14_op = PDF14_END_TRANS_MASK; /* Other parameters not used */ params.csel = csel; return gs_state_update_pdf14trans(pgs, ¶ms); }
/* <cid9font> <cid> .type9mapcid <charstring> <font_index> */ int ztype9mapcid(i_ctx_t *i_ctx_p) { os_ptr op = osp; gs_font *pfont; gs_font_cid0 *pfcid; int code = font_param(op - 1, &pfont); gs_glyph_data_t gdata; int fidx; if (code < 0) return code; if (pfont->FontType != ft_CID_encrypted) return_error(e_invalidfont); check_type(*op, t_integer); pfcid = (gs_font_cid0 *)pfont; gdata.memory = pfont->memory; code = pfcid->cidata.glyph_data((gs_font_base *)pfcid, (gs_glyph)(gs_min_cid_glyph + op->value.intval), &gdata, &fidx); /* return code; original error-sensitive & fragile code */ if (code < 0) { /* failed to load glyph data, put CID 0 */ int default_fallback_CID = 0 ; if_debug2('J', "[J]ztype9cidmap() use CID %d instead of glyph-missing CID %d\n", default_fallback_CID, op->value.intval); op->value.intval = default_fallback_CID; /* reload glyph for default_fallback_CID */ code = pfcid->cidata.glyph_data((gs_font_base *)pfcid, (gs_glyph)(gs_min_cid_glyph + default_fallback_CID), &gdata, &fidx); if (code < 0) { if_debug1('J', "[J]ztype9cidmap() could not load default glyph (CID %d)\n", op->value.intval); return_error(e_invalidfont); } } /****** FOLLOWING IS NOT GENERAL W.R.T. ALLOCATION OF GLYPH DATA ******/ make_const_string(op - 1, a_readonly | imemory_space((gs_ref_memory_t *)pfont->memory), gdata.bits.size, gdata.bits.data); make_int(op, fidx); return code; }
/* Switch a file stream to reading or writing. */ static int s_fileno_switch(stream * s, bool writing) { uint modes = s->file_modes; int fd = sfileno(s); long pos; if (writing) { if (!(s->file_modes & s_mode_write)) return ERRC; pos = stell(s); if_debug2('s', "[s]switch 0x%lx to write at %ld\n", (ulong) s, pos); lseek(fd, pos, SEEK_SET); /* pacify OS */ if (modes & s_mode_append) { sappend_file(s, s->file, s->cbuf, s->cbsize); /* sets position */ } else { swrite_file(s, s->file, s->cbuf, s->cbsize); s->position = pos; } s->modes = modes; } else { if (!(s->file_modes & s_mode_read)) return ERRC; pos = stell(s); if_debug2('s', "[s]switch 0x%lx to read at %ld\n", (ulong) s, pos); if (sflush(s) < 0) return ERRC; lseek(fd, 0L, SEEK_CUR); /* pacify OS */ sread_file(s, s->file, s->cbuf, s->cbsize); s->modes |= modes & s_mode_append; /* don't lose append info */ s->position = pos; } s->file_modes = modes; return 0; }
/* dump information from a jasper image struct for debugging */ static int dump_jas_image(jas_image_t *image) { int i, numcmpts = jas_image_numcmpts(image); int clrspc = jas_image_clrspc(image); const char *csname = "unrecognized vendor space"; if (image == NULL) return 1; if_debug2('w', "[w]JPX image is %d x %d\n", (int)jas_image_width(image), (int)jas_image_height(image)); /* sort the colorspace */ if jas_clrspc_isunknown(clrspc) csname = "unknown"; else switch (clrspc) {
int gx_init_transparency_mask(gs_imager_state * pis, const gs_pdf14trans_params_t * pparams) { gs_transparency_source_t *ptm; if_debug2('v', "[v](0x%lx)gx_init_transparency_mask(%d)\n", (ulong)pis, (int)pparams->csel); switch (pparams->csel) { case TRANSPARENCY_CHANNEL_Opacity: ptm = &pis->opacity; break; case TRANSPARENCY_CHANNEL_Shape: ptm = &pis->shape; break; default: return_error(gs_error_rangecheck); } rc_decrement_only(ptm->mask, "gs_init_transparency_mask"); ptm->mask = 0; return 0; }
static long heap_available() { long avail = 0; void *probes[max_malloc_probes]; uint n; for (n = 0; n < max_malloc_probes; n++) { if ((probes[n] = malloc(malloc_probe_size)) == 0) break; if_debug2('a', "[a]heap_available probe[%d]=0x%lx\n", n, (ulong) probes[n]); avail += malloc_probe_size; } while (n) free(probes[--n]); return avail; }
/* Initialize a stream for writing an OS file. */ void swrite_fileno(register stream * s, FILE * file, byte * buf, uint len) { static const stream_procs p = { s_std_noavailable, s_fileno_write_seek, s_std_write_reset, s_fileno_write_flush, s_fileno_write_close, s_fileno_write_process, s_fileno_switch }; s_std_init(s, buf, len, &p, (file == stdout ? s_mode_write : s_mode_write + s_mode_seek)); if_debug2('s', "[s]write file=0x%lx, fd=%d\n", (ulong) file, fileno(file)); s->file = file; s->file_modes = s->modes; s->file_offset = 0; /* in case we switch to reading later */ s->file_limit = max_long; /* ibid. */ }
/* Scale the CTM and character matrix for oversampling. */ int gx_scale_char_matrix(register gs_state * pgs, int sx, int sy) { #define scale_cxy(s, vx, vy)\ if ( s != 1 )\ { pgs->ctm.vx *= s;\ pgs->ctm.vy *= s;\ pgs->ctm_inverse_valid = false;\ if ( pgs->char_tm_valid )\ { pgs->char_tm.vx *= s;\ pgs->char_tm.vy *= s;\ }\ } scale_cxy(sx, xx, yx); scale_cxy(sy, xy, yy); #undef scale_cxy if_debug2('x', "[x]char scale: %d %d\n", sx, sy); return 0; }
static void gslt_free_object(gs_memory_t * mem, void *ptr, client_name_t cname) { if ( ptr != NULL ) { byte *bptr = (byte *)ptr; uint header_size = round_up_to_align(1) * 2; #ifdef DEBUG if ( gs_debug_c('@') ) memset(bptr-header_size, 0xee, header_size + get_size(ptr)); #endif free(bptr-header_size); num_free_called ++; #ifdef DEBUG /* da for debug allocator - so scripts can parse the trace */ if_debug2('A', "[da]:free:%p:%s\n", ptr, cname ); #endif } }
/* - save <save> */ int zsave(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint space = icurrent_space; vm_save_t *vmsave; ulong sid; int code; gs_state *prev; if (I_VALIDATE_BEFORE_SAVE) ivalidate_clean_spaces(i_ctx_p); ialloc_set_space(idmemory, avm_local); vmsave = ialloc_struct(vm_save_t, &st_vm_save, "zsave"); ialloc_set_space(idmemory, space); if (vmsave == 0) return_error(e_VMerror); code = alloc_save_state(idmemory, vmsave, &sid); if (code < 0) return code; if (sid == 0) { ifree_object(vmsave, "zsave"); return_error(e_VMerror); } if_debug2('u', "[u]vmsave 0x%lx, id = %lu\n", (ulong) vmsave, (ulong) sid); code = gs_gsave_for_save(igs, &prev); if (code < 0) return code; code = gs_gsave(igs); if (code < 0) return code; vmsave->gsave = prev; push(1); make_tav(op, t_save, 0, saveid, sid); if (I_VALIDATE_AFTER_SAVE) ivalidate_clean_spaces(i_ctx_p); return 0; }
static void rc_gsicc_link_cache_free(gs_memory_t * mem, void *ptr_in, client_name_t cname) { /* Ending the entire cache. The ref counts on all the links should be 0 */ gsicc_link_cache_t *link_cache = (gsicc_link_cache_t * ) ptr_in; while (link_cache->head != NULL) { gsicc_remove_link(link_cache->head, mem); link_cache->num_links--; } #ifdef DEBUG if (link_cache->num_links != 0) { eprintf1("num_links is %d, should be 0.\n", link_cache->num_links); } #endif gx_semaphore_free(link_cache->wait); link_cache->wait = NULL; gx_monitor_free(link_cache->lock); link_cache->lock = NULL; if_debug2(gs_debug_flag_icc,"[icc] Removing link cache = 0x%x memory = 0x%x\n", link_cache, link_cache->memory); gs_free_object(mem->stable_memory, link_cache, "rc_gsicc_link_cache_free"); }
/* all of the allocation routines modulo realloc reduce to the this function */ static byte * gslt_alloc(gs_memory_t *mem, uint size, gs_memory_type_ptr_t type, client_name_t cname) { uint minsize, newsize; /* NB apparently there is code floating around that does 0 size mallocs. sigh. */ if ( size == 0 ) return NULL; /* use 2 starting machine words for size and type - assumes malloc() returns on max boundary and first 2 words will hold two longs. Doesn't check for overflow - malloc will fail for us. Update size. */ minsize = round_up_to_align(1); newsize = size + minsize + minsize; { byte *ptr = (byte *)malloc(newsize); if ( !ptr ) return NULL; num_alloc_called ++; #ifdef DEBUG if_debug2('A', "[da]:malloc:%p:%s\n", &ptr[minsize * 2], cname ); #endif /* set the type and size */ set_type(ptr, type); set_size(ptr, size); /* initialize for debugging */ #ifdef DEBUG if ( gs_debug_c('@') ) memset(&ptr[minsize * 2], 0xff, get_size(&ptr[minsize * 2])); #endif /* return the memory after the size and type words. */ return &ptr[minsize * 2]; } }
int gs_end_transparency_mask(gs_state *pgs, gs_transparency_channel_selector_t csel) { gs_pdf14trans_params_t params = { 0 }; gs_pdf14trans_params_t params_color = { 0 }; gs_imager_state * pis = (gs_imager_state *)pgs; int code; if (check_for_nontrans_pattern(pgs, (unsigned char *)"gs_end_transparency_mask")) { return(0); } /* If we have done a q then set a flag to watch for any Qs */ /* if (pis->trans_flags.xstate_pending) pis->trans_flags.xstate_change = true; */ /* This should not depend upon if we have encountered a q operation. We could be setting a softmask, before there is any q operation. Unlikely but it could happen. Then if we encouter a q operation (and this flag is true) we will need to push the mask graphic state (PDF14_PUSH_TRANS_STATE). */ pis->trans_flags.xstate_change = true; if_debug1('v', "[v]xstate_changed set true, gstate level is %d\n", pgs->level); if_debug2('v', "[v](0x%lx)gs_end_transparency_mask(%d)\n", (ulong)pgs, (int)csel); params.pdf14_op = PDF14_END_TRANS_MASK; /* Other parameters not used */ params.csel = csel; /* If this is the outer end then return us to our normal defaults */ if_debug0('v', "[v]popping soft mask color sending\n"); params_color.pdf14_op = PDF14_POP_SMASK_COLOR; code = gs_state_update_pdf14trans(pgs, ¶ms_color); if (code < 0) return(code); return gs_state_update_pdf14trans(pgs, ¶ms); }
int gs_image_next_planes(gs_image_enum * penum, gs_const_string *plane_data /*[num_planes]*/, uint *used /*[num_planes]*/) { const int num_planes = penum->num_planes; int i; int code = 0; #ifdef DEBUG vd_get_dc('i'); vd_set_shift(0, 0); vd_set_scale(0.01); vd_set_origin(0, 0); if (gs_debug_c('b')) { int pi; for (pi = 0; pi < num_planes; ++pi) dprintf6("[b]plane %d source=0x%lx,%u pos=%u data=0x%lx,%u\n", pi, (ulong)penum->planes[pi].source.data, penum->planes[pi].source.size, penum->planes[pi].pos, (ulong)plane_data[pi].data, plane_data[pi].size); } #endif for (i = 0; i < num_planes; ++i) { used[i] = 0; if (penum->wanted[i] && plane_data[i].size != 0) { penum->planes[i].source.size = plane_data[i].size; penum->planes[i].source.data = plane_data[i].data; } } for (;;) { /* If wanted can vary, only transfer 1 row at a time. */ int h = (penum->wanted_varies ? 1 : max_int); /* Move partial rows from source[] to row[]. */ for (i = 0; i < num_planes; ++i) { int pos, size; uint raster; if (!penum->wanted[i]) continue; /* skip unwanted planes */ pos = penum->planes[i].pos; size = penum->planes[i].source.size; raster = penum->image_planes[i].raster; if (size > 0) { if (pos < raster && (pos != 0 || size < raster)) { /* Buffer a partial row. */ int copy = min(size, raster - pos); uint old_size = penum->planes[i].row.size; /* Make sure the row buffer is fully allocated. */ if (raster > old_size) { gs_memory_t *mem = gs_image_row_memory(penum); byte *old_data = penum->planes[i].row.data; byte *row = (old_data == 0 ? gs_alloc_string(mem, raster, "gs_image_next(row)") : gs_resize_string(mem, old_data, old_size, raster, "gs_image_next(row)")); if_debug5('b', "[b]plane %d row (0x%lx,%u) => (0x%lx,%u)\n", i, (ulong)old_data, old_size, (ulong)row, raster); if (row == 0) { code = gs_note_error(gs_error_VMerror); free_row_buffers(penum, i, "gs_image_next(row)"); break; } penum->planes[i].row.data = row; penum->planes[i].row.size = raster; } memcpy(penum->planes[i].row.data + pos, penum->planes[i].source.data, copy); penum->planes[i].source.data += copy; penum->planes[i].source.size = size -= copy; penum->planes[i].pos = pos += copy; used[i] += copy; } } if (h == 0) continue; /* can't transfer any data this cycle */ if (pos == raster) { /* * This plane will be transferred from the row buffer, * so we can only transfer one row. */ h = min(h, 1); penum->image_planes[i].data = penum->planes[i].row.data; } else if (pos == 0 && size >= raster) { /* We can transfer 1 or more planes from the source. */ h = min(h, size / raster); penum->image_planes[i].data = penum->planes[i].source.data; } else h = 0; /* not enough data in this plane */ } if (h == 0 || code != 0) break; /* Pass rows to the device. */ if (penum->dev == 0) { /* * ****** NOTE: THE FOLLOWING IS NOT CORRECT FOR ImageType 3 * ****** InterleaveType 2, SINCE MASK HEIGHT AND IMAGE HEIGHT * ****** MAY DIFFER (BY AN INTEGER FACTOR). ALSO, plane_depths[0] * ****** AND plane_widths[0] ARE NOT UPDATED. */ if (penum->y + h < penum->height) code = 0; else h = penum->height - penum->y, code = 1; } else { code = gx_image_plane_data_rows(penum->info, penum->image_planes, h, &h); if_debug2('b', "[b]used %d, code=%d\n", h, code); penum->error = code < 0; } penum->y += h; /* Update positions and sizes. */ if (h == 0) break; for (i = 0; i < num_planes; ++i) { int count; if (!penum->wanted[i]) continue; count = penum->image_planes[i].raster * h; if (penum->planes[i].pos) { /* We transferred the row from the row buffer. */ penum->planes[i].pos = 0; } else { /* We transferred the row(s) from the source. */ penum->planes[i].source.data += count; penum->planes[i].source.size -= count; used[i] += count; } } cache_planes(penum); if (code > 0) break; } /* Return the retained data pointers. */ for (i = 0; i < num_planes; ++i) plane_data[i] = penum->planes[i].source; vd_release_dc; return code; }
irender_proc_t gs_image_class_1_simple(gx_image_enum * penum) { irender_proc_t rproc; fixed ox = dda_current(penum->dda.pixel0.x); fixed oy = dda_current(penum->dda.pixel0.y); if (penum->use_rop || penum->spp != 1 || penum->bps != 1) return 0; switch (penum->posture) { case image_portrait: { /* Use fast portrait algorithm. */ long dev_width = fixed2long_pixround(ox + penum->x_extent.x) - fixed2long_pixround(ox); if (dev_width != penum->rect.w) { /* * Add an extra align_bitmap_mod of padding so that * we can align scaled rows with the device. */ long line_size = bitmap_raster(any_abs(dev_width)) + align_bitmap_mod; if (penum->adjust != 0 || line_size > max_uint) return 0; /* Must buffer a scan line. */ penum->line_width = any_abs(dev_width); penum->line_size = (uint) line_size; penum->line = gs_alloc_bytes(penum->memory, penum->line_size, "image line"); if (penum->line == 0) { gx_default_end_image(penum->dev, (gx_image_enum_common_t *)penum, false); return 0; } } if_debug2('b', "[b]render=simple, unpack=copy; rect.w=%d, dev_width=%ld\n", penum->rect.w, dev_width); rproc = image_render_simple; break; } case image_landscape: { /* Use fast landscape algorithm. */ long dev_width = fixed2long_pixround(oy + penum->x_extent.y) - fixed2long_pixround(oy); long line_size = (dev_width = any_abs(dev_width), bitmap_raster(dev_width) * 8 + ROUND_UP(dev_width, 8) * align_bitmap_mod); if ((dev_width != penum->rect.w && penum->adjust != 0) || line_size > max_uint ) return 0; /* Must buffer a group of 8N scan lines. */ penum->line_width = dev_width; penum->line_size = (uint) line_size; penum->line = gs_alloc_bytes(penum->memory, penum->line_size, "image line"); if (penum->line == 0) { gx_default_end_image(penum->dev, (gx_image_enum_common_t *) penum, false); return 0; } penum->xi_next = penum->line_xy = fixed2int_var_rounded(ox); if_debug3('b', "[b]render=landscape, unpack=copy; rect.w=%d, dev_width=%ld, line_size=%ld\n", penum->rect.w, dev_width, line_size); rproc = image_render_landscape; /* Precompute values needed for rasterizing. */ penum->dxy = float2fixed(penum->matrix.xy + fixed2float(fixed_epsilon) / 2); break; } default: return 0; } /* Precompute values needed for rasterizing. */ penum->dxx = float2fixed(penum->matrix.xx + fixed2float(fixed_epsilon) / 2); /* * We don't want to spread the samples, but we have to reset unpack_bps * to prevent the buffer pointer from being incremented by 8 bytes per * input byte. */ penum->unpack = sample_unpack_copy; penum->unpack_bps = 8; if (penum->use_mask_color) { /* * Set the masked color as 'no_color' to make it transparent * according to the mask color range and the decoding. */ penum->masked = true; if (penum->mask_color.values[0] == 1) { /* if v0 == 1, 1 is transparent since v1 must be == 1 to be a valid range */ set_nonclient_dev_color(penum->map[0].inverted ? penum->icolor0 : penum->icolor1, gx_no_color_index); } else if (penum->mask_color.values[1] == 0) { /* if v1 == 0, 0 is transparent since v0 must be == 0 to be a valid range */ set_nonclient_dev_color(penum->map[0].inverted ? penum->icolor1 : penum->icolor0, gx_no_color_index); } else { /* * The only other possible in-range value is v0 = 0, v1 = 1. * The image is completely transparent! */ rproc = image_render_skip; } penum->map[0].decoding = sd_none; } return rproc; }
/* untraced space, so relocate all refs, not just marked ones. */ void igc_reloc_refs(ref_packed * from, ref_packed * to, gc_state_t * gcst) { int min_trace = gcst->min_collect; ref_packed *rp = from; bool do_all = gcst->relocating_untraced; vm_spaces spaces = gcst->spaces; const gs_memory_t *cmem = space_system->stable_memory; while (rp < to) { ref *pref; #ifdef DEBUG const void *before = 0; const void *after = 0; # define DO_RELOC(var, stat)\ BEGIN before = (var); stat; after = (var); END # define SET_RELOC(var, expr)\ BEGIN before = (var); after = (var) = (expr); END #else # define DO_RELOC(var, stat) stat # define SET_RELOC(var, expr) var = expr #endif if (r_is_packed(rp)) { rp++; continue; } /* The following assignment is logically unnecessary; */ /* we do it only for convenience in debugging. */ pref = (ref *) rp; if_debug3('8', " [8]relocating %s %d ref at 0x%lx\n", (r_has_attr(pref, l_mark) ? "marked" : "unmarked"), r_btype(pref), (ulong) pref); if ((r_has_attr(pref, l_mark) || do_all) && r_space(pref) >= min_trace ) { switch (r_type(pref)) { /* Struct cases */ case t_file: DO_RELOC(pref->value.pfile, RELOC_VAR(pref->value.pfile)); break; case t_device: DO_RELOC(pref->value.pdevice, RELOC_VAR(pref->value.pdevice)); break; case t_fontID: case t_struct: case t_astruct: DO_RELOC(pref->value.pstruct, RELOC_VAR(pref->value.pstruct)); break; /* Non-trivial non-struct cases */ case t_dictionary: rputc('d'); SET_RELOC(pref->value.pdict, (dict *)igc_reloc_ref_ptr((ref_packed *)pref->value.pdict, gcst)); break; case t_array: { uint size = r_size(pref); if (size != 0) { /* value.refs might be NULL */ /* * If the array is large, we allocated it in its * own object (at least originally -- this might * be a pointer to a subarray.) In this case, * we know it is the only object in its * containing st_refs object, so we know that * the mark containing the relocation appears * just after it. */ if (size < max_size_st_refs / sizeof(ref)) { rputc('a'); SET_RELOC(pref->value.refs, (ref *) igc_reloc_ref_ptr( (ref_packed *) pref->value.refs, gcst)); } else { rputc('A'); /* * See the t_shortarray case below for why we * decrement size. */ --size; SET_RELOC(pref->value.refs, (ref *) igc_reloc_ref_ptr( (ref_packed *) (pref->value.refs + size), gcst) - size); } } } break; case t_mixedarray: if (r_size(pref) != 0) { /* value.refs might be NULL */ rputc('m'); SET_RELOC(pref->value.packed, igc_reloc_ref_ptr(pref->value.packed, gcst)); } break; case t_shortarray: { uint size = r_size(pref); /* * Since we know that igc_reloc_ref_ptr works by * scanning forward, and we know that all the * elements of this array itself are marked, we can * save some scanning time by relocating the pointer * to the end of the array rather than the * beginning. */ if (size != 0) { /* value.refs might be NULL */ rputc('s'); /* * igc_reloc_ref_ptr has to be able to determine * whether the pointer points into a space that * isn't being collected. It does this by * checking whether the referent of the pointer * is marked. For this reason, we have to pass * a pointer to the last real element of the * array, rather than just beyond it. */ --size; SET_RELOC(pref->value.packed, igc_reloc_ref_ptr(pref->value.packed + size, gcst) - size); } } break; case t_name: { void *psub = name_ref_sub_table(cmem, pref); void *rsub = RELOC_OBJ(psub); /* gcst implicit */ SET_RELOC(pref->value.pname, (name *) ((char *)rsub + ((char *)pref->value.pname - (char *)psub))); } break; case t_string: { gs_string str; str.data = pref->value.bytes; str.size = r_size(pref); DO_RELOC(str.data, RELOC_STRING_VAR(str)); pref->value.bytes = str.data; } break; case t_oparray: rputc('o'); SET_RELOC(pref->value.const_refs, (const ref *)igc_reloc_ref_ptr((const ref_packed *)pref->value.const_refs, gcst)); break; default: goto no_reloc; /* don't print trace message */ } if_debug2('8', " [8]relocated 0x%lx => 0x%lx\n", (ulong)before, (ulong)after); } no_reloc: rp += packed_per_ref; } }
/* Remove the marks at the same time. */ static void refs_compact(const gs_memory_t *mem, obj_header_t * pre, obj_header_t * dpre, uint size) { ref_packed *dest; ref_packed *src; ref_packed *end; uint new_size; /* The next switch controls an optimization for the loop termination condition. It was useful during the development, when some assumptions were temporary wrong. We keep it for records. */ src = (ref_packed *) (pre + 1); end = (ref_packed *) ((byte *) src + size); /* * We know that a block of refs always ends with a * full-size ref, so we only need to check for reaching the end * of the block when we see one of those. */ if (dpre == pre) /* Loop while we don't need to copy. */ for (;;) { if (r_is_packed(src)) { if (!r_has_pmark(src)) break; if_debug1('8', " [8]packed ref 0x%lx \"copied\"\n", (ulong) src); *src &= ~lp_mark; src++; } else { /* full-size ref */ ref *const pref = (ref *)src; if (!r_has_attr(pref, l_mark)) break; if_debug1('8', " [8]ref 0x%lx \"copied\"\n", (ulong) src); r_clear_attrs(pref, l_mark); src += packed_per_ref; } } else *dpre = *pre; dest = (ref_packed *) ((char *)dpre + ((char *)src - (char *)pre)); for (;;) { if (r_is_packed(src)) { if (r_has_pmark(src)) { if_debug2('8', " [8]packed ref 0x%lx copied to 0x%lx\n", (ulong) src, (ulong) dest); *dest++ = *src & ~lp_mark; } src++; } else { /* full-size ref */ if (r_has_attr((ref *) src, l_mark)) { ref rtemp; if_debug2('8', " [8]ref 0x%lx copied to 0x%lx\n", (ulong) src, (ulong) dest); /* We can't just use ref_assign_inline, */ /* because the source and destination */ /* might overlap! */ ref_assign_inline(&rtemp, (ref *) src); r_clear_attrs(&rtemp, l_mark); ref_assign_inline((ref *) dest, &rtemp); src += packed_per_ref; dest += packed_per_ref; } else { /* check for end of block */ src += packed_per_ref; if (src >= end) break; } } } new_size = (byte *) dest - (byte *) (dpre + 1) + sizeof(ref); #ifdef DEBUG /* Check that the relocation came out OK. */ /* NOTE: this check only works within a single chunk. */ if ((byte *) src - (byte *) dest != r_size((ref *) src - 1) + sizeof(ref)) { lprintf3("Reloc error for refs 0x%lx: reloc = %lu, stored = %u\n", (ulong) dpre, (ulong) ((byte *) src - (byte *) dest), (uint) r_size((ref *) src - 1)); gs_abort(mem); } #endif /* Pad to a multiple of sizeof(ref). */ while (new_size & (sizeof(ref) - 1)) *dest++ = pt_tag(pt_integer), new_size += sizeof(ref_packed); /* We want to make the newly freed space into a free block, */ /* but we can only do this if we have enough room. */ if (size - new_size < sizeof(obj_header_t)) { /* Not enough room. Pad to original size. */ while (new_size < size) *dest++ = pt_tag(pt_integer), new_size += sizeof(ref_packed); } else { obj_header_t *pfree = (obj_header_t *) ((ref *) dest + 1); pfree->o_alone = 0; pfree->o_size = size - new_size - sizeof(obj_header_t); pfree->o_type = &st_bytes; } /* Re-create the final ref. */ r_set_type((ref *) dest, t_integer); dpre->o_size = new_size; }
/* Set the relocation for a ref object. */ static bool refs_set_reloc(obj_header_t * hdr, uint reloc, uint size) { ref_packed *rp = (ref_packed *) (hdr + 1); ref_packed *end = (ref_packed *) ((byte *) rp + size); uint freed = 0; /* * We have to be careful to keep refs aligned properly. * For the moment, we do this by either keeping or discarding * an entire (aligned) block of align_packed_per_ref packed elements * as a unit. We know that align_packed_per_ref <= packed_per_ref, * and we also know that packed refs are always allocated in blocks * of align_packed_per_ref, so this makes things relatively easy. */ while (rp < end) { if (r_is_packed(rp)) { #if align_packed_per_ref == 1 if (r_has_pmark(rp)) { if_debug1('8', " [8]packed ref 0x%lx is marked\n", (ulong) rp); rp++; } else { #else int i; /* * Note: align_packed_per_ref is typically * 2 or 4 for 32-bit processors. */ #define all_marked (align_packed_per_ref * lp_mark) # if align_packed_per_ref == 2 # if arch_sizeof_int == arch_sizeof_short * 2 # undef all_marked # define all_marked ( (lp_mark << (sizeof(short) * 8)) + lp_mark ) # define marked (*(int *)rp & all_marked) # else # define marked ((*rp & lp_mark) + (rp[1] & lp_mark)) # endif # else # if align_packed_per_ref == 4 # define marked ((*rp & lp_mark) + (rp[1] & lp_mark) +\ (rp[2] & lp_mark) + (rp[3] & lp_mark)) # else /* * The value of marked is logically a uint, not an int: * we declare it as int only to avoid a compiler warning * message about using a non-int value in a switch statement. */ int marked = *rp & lp_mark; for (i = 1; i < align_packed_per_ref; i++) marked += rp[i] & lp_mark; # endif # endif /* * Now marked is lp_mark * the number of marked * packed refs in the aligned block, except for * a couple of special cases above. */ switch (marked) { case all_marked: if_debug2('8', " [8]packed refs 0x%lx..0x%lx are marked\n", (ulong) rp, (ulong) (rp + (align_packed_per_ref - 1))); rp += align_packed_per_ref; break; default: /* At least one packed ref in the block */ /* is marked: Keep the whole block. */ for (i = align_packed_per_ref; i--; rp++) { r_set_pmark(rp); if_debug1('8', " [8]packed ref 0x%lx is marked\n", (ulong) rp); } break; case 0: #endif if_debug2('8', " [8]%d packed ref(s) at 0x%lx are unmarked\n", align_packed_per_ref, (ulong) rp); { uint rel = reloc + freed; /* Change this to an integer so we can */ /* store the relocation here. */ *rp = pt_tag(pt_integer) + min(rel, packed_max_value); } rp += align_packed_per_ref; freed += sizeof(ref_packed) * align_packed_per_ref; } } else { /* full-size ref */ uint rel = reloc + freed; /* The following assignment is logically */ /* unnecessary; we do it only for convenience */ /* in debugging. */ ref *pref = (ref *) rp; if (!r_has_attr(pref, l_mark)) { if_debug1('8', " [8]ref 0x%lx is unmarked\n", (ulong) pref); /* Change this to a mark so we can */ /* store the relocation. */ r_set_type(pref, t_mark); r_set_size(pref, rel); freed += sizeof(ref); } else { if_debug1('8', " [8]ref 0x%lx is marked\n", (ulong) pref); /* Store the relocation here if possible. */ if (!ref_type_uses_size_or_null(r_type(pref))) { if_debug2('8', " [8]storing reloc %u at 0x%lx\n", rel, (ulong) pref); r_set_size(pref, rel); } } rp += packed_per_ref; } } if_debug3('7', " [7]at end of refs 0x%lx, size = %u, freed = %u\n", (ulong) (hdr + 1), size, freed); if (freed == size) return false; #if arch_sizeof_int > arch_sizeof_short /* * If the final relocation can't fit in the r_size field * (which can't happen if the object shares a chunk with * any other objects, so we know reloc = 0 in this case), * we have to keep the entire object unless there are no * references to any ref in it. */ if (freed <= max_ushort) return true; /* * We have to mark all surviving refs, but we also must * overwrite any non-surviving refs with something that * doesn't contain any pointers. */ rp = (ref_packed *) (hdr + 1); while (rp < end) { if (r_is_packed(rp)) { if (!r_has_pmark(rp)) *rp = pt_tag(pt_integer) | lp_mark; ++rp; } else { /* The following assignment is logically */ /* unnecessary; we do it only for convenience */ /* in debugging. */ ref *pref = (ref *) rp; if (!r_has_attr(pref, l_mark)) { r_set_type_attrs(pref, t_mark, l_mark); r_set_size(pref, reloc); } else { if (!ref_type_uses_size_or_null(r_type(pref))) r_set_size(pref, reloc); } rp += packed_per_ref; } } /* The last ref has to remain unmarked. */ r_clear_attrs((ref *) rp - 1, l_mark); #endif return true; }