/* <obj1> ... <objn> <n> .execn - */ static int zexecn(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint n, i; es_ptr esp_orig; check_int_leu(*op, max_uint - 1); n = (uint) op->value.intval; check_op(n + 1); check_estack(n); esp_orig = esp; for (i = 0; i < n; ++i) { const ref *rp = ref_stack_index(&o_stack, (long)(i + 1)); /* Make sure this object is legal to execute. */ if (ref_type_uses_access(r_type(rp))) { if (!r_has_attr(rp, a_execute) && r_has_attr(rp, a_executable) ) { esp = esp_orig; return_error(e_invalidaccess); } } /* Executable nulls have a special meaning on the e-stack, */ /* so since they are no-ops, don't push them. */ if (!r_has_type_attrs(rp, t_null, a_executable)) { ++esp; ref_assign(esp, rp); } } esfile_check_cache(); pop(n + 1); return o_push_estack; }
/* * Check the operand of exec or stopped. Return 0 if OK to execute, or a * negative error code. We emulate an apparent bug in Adobe interpreters, * which cause an invalidaccess error when 'exec'ing a noaccess literal * (other than dictionaries). We also match the Adobe interpreters in that * we catch noaccess executable objects here, rather than waiting for the * interpreter to catch them, so that we can signal the error with the * object still on the operand stack. */ static bool check_for_exec(const_os_ptr op) { if (!r_has_attr(op, a_execute) && /* only true if noaccess */ ref_type_uses_access(r_type(op)) && (r_has_attr(op, a_executable) || !r_has_type(op, t_dictionary)) ) { return_error(e_invalidaccess); } return 0; }
static int zrunandhide(i_ctx_t *i_ctx_p) { os_ptr op = osp; es_ptr ep; check_op(2); if (!r_is_array(op - 1)) return_op_typecheck(op); if (!r_has_attr(op, a_executable)) return 0; /* literal object just gets pushed back */ check_estack(5); ep = esp += 5; make_mark_estack(ep - 4, es_other, err_end_runandhide); /* error case */ make_op_estack(ep - 1, end_runandhide); /* normal case */ ref_assign(ep, op); /* Store the object we are hiding and it's current tas.type_attrs */ /* on the exec stack then change to 'noaccess' */ make_int(ep - 3, (int)op[-1].tas.type_attrs); ref_assign(ep - 2, op - 1); r_clear_attrs(ep - 2, a_all); /* replace the array with a special kind of mark that has a_read access */ esfile_check_cache(); pop(2); return o_push_estack; }
/* Get some double arguments. */ static int double_params(os_ptr op, int count, double *pval) { pval += count; while (--count >= 0) { switch (r_type(op)) { case t_real: *--pval = op->value.realval; break; case t_integer: *--pval = op->value.intval; break; case t_string: if (!r_has_attr(op, a_read) || r_size(op) != sizeof(double) ) return_error(e_typecheck); --pval; memcpy(pval, op->value.bytes, sizeof(double)); break; case t__invalid: return_error(e_stackunderflow); default: return_error(e_typecheck); } op--; } return 0; }
/* <file> flushfile - */ static int zflushfile(i_ctx_t *i_ctx_p) { os_ptr op = osp; stream *s; int status; check_type(*op, t_file); /* * We think flushfile is a no-op on closed input files, but causes an * error on closed output files. */ if (file_is_invalid(s, op)) { if (r_has_attr(op, a_write)) return_error(e_invalidaccess); pop(1); return 0; } status = sflush(s); if (status == 0 || status == EOFC) { pop(1); return 0; } return (s_is_writing(s) ? handle_write_status(i_ctx_p, status, op, NULL, zflushfile) : handle_read_status(i_ctx_p, status, op, NULL, zflushfile)); }
/* <obj> xcheck <bool> */ static int zxcheck(i_ctx_t *i_ctx_p) { os_ptr op = osp; check_op(1); make_bool(op, (r_has_attr(ACCESS_REF(op), a_executable) ? 1 : 0)); return 0; }
int ztoken(i_ctx_t *i_ctx_p) { os_ptr op = osp; switch (r_type(op)) { default: return_op_typecheck(op); case t_file: { stream *s; scanner_state state; check_read_file(i_ctx_p, s, op); check_ostack(1); gs_scanner_init(&state, op); return token_continue(i_ctx_p, &state, true); } case t_string: { ref token; /* -1 is to remove the string operand in case of error. */ int orig_ostack_depth = ref_stack_count(&o_stack) - 1; int code; /* Don't pop the operand in case of invalidaccess. */ if (!r_has_attr(op, a_read)) return_error(e_invalidaccess); code = gs_scan_string_token(i_ctx_p, op, &token); switch (code) { case scan_EOF: /* no tokens */ make_false(op); return 0; default: if (code < 0) { /* * Clear anything that may have been left on the ostack, * including the string operand. */ if (orig_ostack_depth < ref_stack_count(&o_stack)) pop(ref_stack_count(&o_stack)- orig_ostack_depth); return code; } } push(2); op[-1] = token; make_true(op); return 0; } } }
/* The stack underflow check is harmless in the off-stack case. */ int check_proc_failed(const ref * pref) { if (r_is_array(pref)) { if (r_has_attr(pref, a_executable)) return e_invalidaccess; else return e_typecheck; } else { if (r_has_type(pref, t__invalid)) return e_stackunderflow; else return e_typecheck; } }
/* Mark a ref. Return true if new mark. */ bool ptr_ref_mark(enum_ptr_t *pep, gc_state_t * ignored) { ref_packed *rpp = (void *)pep->ptr; if (r_is_packed(rpp)) { if (r_has_pmark(rpp)) return false; r_set_pmark(rpp); } else { ref *const pref = (ref *)rpp; if (r_has_attr(pref, l_mark)) return false; r_set_attrs(pref, l_mark); } return true; }
static int zsuperexec(i_ctx_t *i_ctx_p) { os_ptr op = osp; es_ptr ep; check_op(1); if (!r_has_attr(op, a_executable)) return 0; /* literal object just gets pushed back */ check_estack(2); ep = esp += 3; make_mark_estack(ep - 2, es_other, end_superexec); /* error case */ make_op_estack(ep - 1, end_superexec); /* normal case */ ref_assign(ep, op); esfile_check_cache(); pop(1); i_ctx_p->in_superexec++; return o_push_estack; }
/* <obj> exec - */ int zexec(i_ctx_t *i_ctx_p) { os_ptr op = osp; int code; check_op(1); code = check_for_exec(op); if (code < 0) { return code; } if (!r_has_attr(op, a_executable)) { return 0; /* shortcut, literal object just gets pushed back */ } check_estack(1); ++esp; ref_assign(esp, op); esfile_check_cache(); pop(1); return o_push_estack; }
ref_packed * igc_reloc_ref_ptr(const ref_packed * prp, gc_state_t *gcst) { /* * Search forward for relocation. This algorithm is intrinsically very * inefficient; we hope eventually to replace it with a better one. */ const ref_packed *rp = prp; #ifdef ALIGNMENT_ALIASING_BUG const ref *rpref; # define RP_REF(rp) (rpref = (const ref *)rp, rpref) #else # define RP_REF(rp) ((const ref *)rp) #endif /* * Iff this pointer points into a space that wasn't traced, * the referent won't be marked. In this case, we shouldn't * do any relocation. Check for this first. */ if (r_is_packed(rp)) { if (!r_has_pmark(rp)) goto ret_rp; } else { if (!r_has_attr(RP_REF(rp), l_mark)) goto ret_rp; } return igc_reloc_ref_ptr_nocheck(prp, gcst); ret_rp: /* Use a severely deprecated pun to remove the const property. */ { union { const ref_packed *r; ref_packed *w; } u; u.r = rp; return u.w; } }
/* untraced space, so relocate all refs, not just marked ones. */ void igc_reloc_refs(ref_packed * from, ref_packed * to, gc_state_t * gcst) { int min_trace = gcst->min_collect; ref_packed *rp = from; bool do_all = gcst->relocating_untraced; vm_spaces spaces = gcst->spaces; const gs_memory_t *cmem = space_system->stable_memory; while (rp < to) { ref *pref; #ifdef DEBUG const void *before = 0; const void *after = 0; # define DO_RELOC(var, stat)\ BEGIN before = (var); stat; after = (var); END # define SET_RELOC(var, expr)\ BEGIN before = (var); after = (var) = (expr); END #else # define DO_RELOC(var, stat) stat # define SET_RELOC(var, expr) var = expr #endif if (r_is_packed(rp)) { rp++; continue; } /* The following assignment is logically unnecessary; */ /* we do it only for convenience in debugging. */ pref = (ref *) rp; if_debug3('8', " [8]relocating %s %d ref at 0x%lx\n", (r_has_attr(pref, l_mark) ? "marked" : "unmarked"), r_btype(pref), (ulong) pref); if ((r_has_attr(pref, l_mark) || do_all) && r_space(pref) >= min_trace ) { switch (r_type(pref)) { /* Struct cases */ case t_file: DO_RELOC(pref->value.pfile, RELOC_VAR(pref->value.pfile)); break; case t_device: DO_RELOC(pref->value.pdevice, RELOC_VAR(pref->value.pdevice)); break; case t_fontID: case t_struct: case t_astruct: DO_RELOC(pref->value.pstruct, RELOC_VAR(pref->value.pstruct)); break; /* Non-trivial non-struct cases */ case t_dictionary: rputc('d'); SET_RELOC(pref->value.pdict, (dict *)igc_reloc_ref_ptr((ref_packed *)pref->value.pdict, gcst)); break; case t_array: { uint size = r_size(pref); if (size != 0) { /* value.refs might be NULL */ /* * If the array is large, we allocated it in its * own object (at least originally -- this might * be a pointer to a subarray.) In this case, * we know it is the only object in its * containing st_refs object, so we know that * the mark containing the relocation appears * just after it. */ if (size < max_size_st_refs / sizeof(ref)) { rputc('a'); SET_RELOC(pref->value.refs, (ref *) igc_reloc_ref_ptr( (ref_packed *) pref->value.refs, gcst)); } else { rputc('A'); /* * See the t_shortarray case below for why we * decrement size. */ --size; SET_RELOC(pref->value.refs, (ref *) igc_reloc_ref_ptr( (ref_packed *) (pref->value.refs + size), gcst) - size); } } } break; case t_mixedarray: if (r_size(pref) != 0) { /* value.refs might be NULL */ rputc('m'); SET_RELOC(pref->value.packed, igc_reloc_ref_ptr(pref->value.packed, gcst)); } break; case t_shortarray: { uint size = r_size(pref); /* * Since we know that igc_reloc_ref_ptr works by * scanning forward, and we know that all the * elements of this array itself are marked, we can * save some scanning time by relocating the pointer * to the end of the array rather than the * beginning. */ if (size != 0) { /* value.refs might be NULL */ rputc('s'); /* * igc_reloc_ref_ptr has to be able to determine * whether the pointer points into a space that * isn't being collected. It does this by * checking whether the referent of the pointer * is marked. For this reason, we have to pass * a pointer to the last real element of the * array, rather than just beyond it. */ --size; SET_RELOC(pref->value.packed, igc_reloc_ref_ptr(pref->value.packed + size, gcst) - size); } } break; case t_name: { void *psub = name_ref_sub_table(cmem, pref); void *rsub = RELOC_OBJ(psub); /* gcst implicit */ SET_RELOC(pref->value.pname, (name *) ((char *)rsub + ((char *)pref->value.pname - (char *)psub))); } break; case t_string: { gs_string str; str.data = pref->value.bytes; str.size = r_size(pref); DO_RELOC(str.data, RELOC_STRING_VAR(str)); pref->value.bytes = str.data; } break; case t_oparray: rputc('o'); SET_RELOC(pref->value.const_refs, (const ref *)igc_reloc_ref_ptr((const ref_packed *)pref->value.const_refs, gcst)); break; default: goto no_reloc; /* don't print trace message */ } if_debug2('8', " [8]relocated 0x%lx => 0x%lx\n", (ulong)before, (ulong)after); } no_reloc: rp += packed_per_ref; } }
/* Set the relocation for a ref object. */ static bool refs_set_reloc(obj_header_t * hdr, uint reloc, uint size) { ref_packed *rp = (ref_packed *) (hdr + 1); ref_packed *end = (ref_packed *) ((byte *) rp + size); uint freed = 0; /* * We have to be careful to keep refs aligned properly. * For the moment, we do this by either keeping or discarding * an entire (aligned) block of align_packed_per_ref packed elements * as a unit. We know that align_packed_per_ref <= packed_per_ref, * and we also know that packed refs are always allocated in blocks * of align_packed_per_ref, so this makes things relatively easy. */ while (rp < end) { if (r_is_packed(rp)) { #if align_packed_per_ref == 1 if (r_has_pmark(rp)) { if_debug1('8', " [8]packed ref 0x%lx is marked\n", (ulong) rp); rp++; } else { #else int i; /* * Note: align_packed_per_ref is typically * 2 or 4 for 32-bit processors. */ #define all_marked (align_packed_per_ref * lp_mark) # if align_packed_per_ref == 2 # if arch_sizeof_int == arch_sizeof_short * 2 # undef all_marked # define all_marked ( (lp_mark << (sizeof(short) * 8)) + lp_mark ) # define marked (*(int *)rp & all_marked) # else # define marked ((*rp & lp_mark) + (rp[1] & lp_mark)) # endif # else # if align_packed_per_ref == 4 # define marked ((*rp & lp_mark) + (rp[1] & lp_mark) +\ (rp[2] & lp_mark) + (rp[3] & lp_mark)) # else /* * The value of marked is logically a uint, not an int: * we declare it as int only to avoid a compiler warning * message about using a non-int value in a switch statement. */ int marked = *rp & lp_mark; for (i = 1; i < align_packed_per_ref; i++) marked += rp[i] & lp_mark; # endif # endif /* * Now marked is lp_mark * the number of marked * packed refs in the aligned block, except for * a couple of special cases above. */ switch (marked) { case all_marked: if_debug2('8', " [8]packed refs 0x%lx..0x%lx are marked\n", (ulong) rp, (ulong) (rp + (align_packed_per_ref - 1))); rp += align_packed_per_ref; break; default: /* At least one packed ref in the block */ /* is marked: Keep the whole block. */ for (i = align_packed_per_ref; i--; rp++) { r_set_pmark(rp); if_debug1('8', " [8]packed ref 0x%lx is marked\n", (ulong) rp); } break; case 0: #endif if_debug2('8', " [8]%d packed ref(s) at 0x%lx are unmarked\n", align_packed_per_ref, (ulong) rp); { uint rel = reloc + freed; /* Change this to an integer so we can */ /* store the relocation here. */ *rp = pt_tag(pt_integer) + min(rel, packed_max_value); } rp += align_packed_per_ref; freed += sizeof(ref_packed) * align_packed_per_ref; } } else { /* full-size ref */ uint rel = reloc + freed; /* The following assignment is logically */ /* unnecessary; we do it only for convenience */ /* in debugging. */ ref *pref = (ref *) rp; if (!r_has_attr(pref, l_mark)) { if_debug1('8', " [8]ref 0x%lx is unmarked\n", (ulong) pref); /* Change this to a mark so we can */ /* store the relocation. */ r_set_type(pref, t_mark); r_set_size(pref, rel); freed += sizeof(ref); } else { if_debug1('8', " [8]ref 0x%lx is marked\n", (ulong) pref); /* Store the relocation here if possible. */ if (!ref_type_uses_size_or_null(r_type(pref))) { if_debug2('8', " [8]storing reloc %u at 0x%lx\n", rel, (ulong) pref); r_set_size(pref, rel); } } rp += packed_per_ref; } } if_debug3('7', " [7]at end of refs 0x%lx, size = %u, freed = %u\n", (ulong) (hdr + 1), size, freed); if (freed == size) return false; #if arch_sizeof_int > arch_sizeof_short /* * If the final relocation can't fit in the r_size field * (which can't happen if the object shares a chunk with * any other objects, so we know reloc = 0 in this case), * we have to keep the entire object unless there are no * references to any ref in it. */ if (freed <= max_ushort) return true; /* * We have to mark all surviving refs, but we also must * overwrite any non-surviving refs with something that * doesn't contain any pointers. */ rp = (ref_packed *) (hdr + 1); while (rp < end) { if (r_is_packed(rp)) { if (!r_has_pmark(rp)) *rp = pt_tag(pt_integer) | lp_mark; ++rp; } else { /* The following assignment is logically */ /* unnecessary; we do it only for convenience */ /* in debugging. */ ref *pref = (ref *) rp; if (!r_has_attr(pref, l_mark)) { r_set_type_attrs(pref, t_mark, l_mark); r_set_size(pref, reloc); } else { if (!ref_type_uses_size_or_null(r_type(pref))) r_set_size(pref, reloc); } rp += packed_per_ref; } } /* The last ref has to remain unmarked. */ r_clear_attrs((ref *) rp - 1, l_mark); #endif return true; }
/* <proc> bind <proc> */ static inline bool r_is_ex_oper(const ref *rp) { return (r_has_attr(rp, a_executable) && (r_btype(rp) == t_operator || r_type(rp) == t_oparray)); }
/* * Look up a name on the dictionary stack. * Return the pointer to the value if found, 0 if not. */ ref * dstack_find_name_by_index(dict_stack_t * pds, uint nidx) { ds_ptr pdref = pds->stack.p; /* Since we know the hash function is the identity function, */ /* there's no point in allocating a separate variable for it. */ #define hash dict_name_index_hash(nidx) ref_packed kpack = packed_name_key(nidx); do { dict *pdict = pdref->value.pdict; uint size = npairs(pdict); const gs_memory_t *mem = dict_mem(pdict); #ifdef DEBUG if (gs_debug_c('D')) { ref dnref; name_index_ref(mem, nidx, &dnref); dlputs("[D]lookup "); debug_print_name(mem, &dnref); dprintf3(" in 0x%lx(%u/%u)\n", (ulong) pdict, dict_length(pdref), dict_maxlength(pdref)); } #endif #define INCR_DEPTH(pdref)\ INCR(depth[min(MAX_STATS_DEPTH, pds->stack.p - pdref)]) if (dict_is_packed(pdict)) { packed_search_1(INCR_DEPTH(pdref), return packed_search_value_pointer, DO_NOTHING, goto miss); packed_search_2(INCR_DEPTH(pdref), return packed_search_value_pointer, DO_NOTHING, break); miss:; } else { ref *kbot = pdict->keys.value.refs; register ref *kp; int wrap = 0; /* Search the dictionary */ for (kp = kbot + dict_hash_mod(hash, size) + 2;;) { --kp; if (r_has_type(kp, t_name)) { if (name_index(mem, kp) == nidx) { INCR_DEPTH(pdref); return pdict->values.value.refs + (kp - kbot); } } else if (r_has_type(kp, t_null)) { /* Empty, deleted, or wraparound. */ /* Figure out which. */ if (!r_has_attr(kp, a_executable)) break; if (kp == kbot) { /* wrap */ if (wrap++) break; /* 2 wraps */ kp += size + 1; } } } } #undef INCR_DEPTH }
/* ensuring that refs in mixed arrays are properly aligned. */ #undef idmemory /****** NOTA BENE ******/ int make_packed_array(ref * parr, ref_stack_t * pstack, uint size, gs_dual_memory_t *idmemory, client_name_t cname) { uint i; const ref *pref; uint idest = 0, ishort = 0; ref_packed *pbody; ref_packed *pdest; ref_packed *pshort; /* points to start of */ /* last run of short elements */ gs_ref_memory_t *imem = idmemory->current; uint space = imemory_space(imem); int skip = 0, pad; ref rtemp; int code; /* Do a first pass to calculate the size of the array, */ /* and to detect local-into-global stores. */ for (i = size; i != 0; i--) { pref = ref_stack_index(pstack, i - 1); switch (r_btype(pref)) { /* not r_type, opers are special */ case t_name: if (name_index(imem, pref) >= packed_name_max_index) break; /* can't pack */ idest++; continue; case t_integer: if (pref->value.intval < packed_min_intval || pref->value.intval > packed_max_intval ) break; idest++; continue; case t_oparray: /* Check for local-into-global store. */ store_check_space(space, pref); /* falls through */ case t_operator: { uint oidx; if (!r_has_attr(pref, a_executable)) break; oidx = op_index(pref); if (oidx == 0 || oidx > packed_int_mask) break; } idest++; continue; default: /* Check for local-into-global store. */ store_check_space(space, pref); } /* Can't pack this element, use a full ref. */ /* We may have to unpack up to align_packed_per_ref - 1 */ /* preceding short elements. */ /* If we are at the beginning of the array, however, */ /* we can just move the elements up. */ { int i = (idest - ishort) & (align_packed_per_ref - 1); if (ishort == 0) /* first time */ idest += skip = -i & (align_packed_per_ref - 1); else idest += (packed_per_ref - 1) * i; } ishort = idest += packed_per_ref; } pad = -(int)idest & (packed_per_ref - 1); /* padding at end */ /* Now we can allocate the array. */ code = gs_alloc_ref_array(imem, &rtemp, 0, (idest + pad) / packed_per_ref, cname); if (code < 0) return code; pbody = (ref_packed *) rtemp.value.refs; /* Make sure any initial skipped elements contain legal packed */ /* refs, so that the garbage collector can scan storage. */ pshort = pbody; for (; skip; skip--) *pbody++ = pt_tag(pt_integer); pdest = pbody; for (i = size; i != 0; i--) { pref = ref_stack_index(pstack, i - 1); switch (r_btype(pref)) { /* not r_type, opers are special */ case t_name: { uint nidx = name_index(imem, pref); if (nidx >= packed_name_max_index) break; /* can't pack */ *pdest++ = nidx + (r_has_attr(pref, a_executable) ? pt_tag(pt_executable_name) : pt_tag(pt_literal_name)); } continue; case t_integer: if (pref->value.intval < packed_min_intval || pref->value.intval > packed_max_intval ) break; *pdest++ = pt_tag(pt_integer) + ((short)pref->value.intval - packed_min_intval); continue; case t_oparray: case t_operator: { uint oidx; if (!r_has_attr(pref, a_executable)) break; oidx = op_index(pref); if (oidx == 0 || oidx > packed_int_mask) break; *pdest++ = pt_tag(pt_executable_operator) + oidx; } continue; } /* Can't pack this element, use a full ref. */ /* We may have to unpack up to align_packed_per_ref - 1 */ /* preceding short elements. */ /* Note that if we are at the beginning of the array, */ /* 'skip' already ensures that we don't need to do this. */ { int i = (pdest - pshort) & (align_packed_per_ref - 1); const ref_packed *psrc = pdest; ref *pmove = (ref *) (pdest += (packed_per_ref - 1) * i); ref_assign_new(pmove, pref); while (--i >= 0) { --psrc; --pmove; packed_get(imem->non_gc_memory, psrc, pmove); } } pshort = pdest += packed_per_ref; } { int atype = (pdest == pbody + size ? t_shortarray : t_mixedarray); /* Pad with legal packed refs so that the garbage collector */ /* can scan storage. */ for (; pad; pad--) *pdest++ = pt_tag(pt_integer); /* Finally, make the array. */ ref_stack_pop(pstack, size); make_tasv_new(parr, atype, a_readonly | space, size, packed, pbody + skip); } return 0; }
/* Remove an element from a dictionary. */ int dict_undef(ref * pdref, const ref * pkey, dict_stack_t *pds) { gs_ref_memory_t *mem; ref *pvslot; dict *pdict; uint index; int code = dict_find(pdref, pkey, &pvslot); switch (code) { case 0: case gs_error_dictfull: return_error(gs_error_undefined); case 1: break; default: /* other error */ return code; } /* Remove the entry from the dictionary. */ pdict = pdref->value.pdict; index = pvslot - pdict->values.value.refs; mem = dict_memory(pdict); if (dict_is_packed(pdict)) { ref_packed *pkp = pdict->keys.value.writable_packed + index; bool must_save = ref_must_save_in(mem, &pdict->keys); if_debug3m('d', (const gs_memory_t *)mem, "[d]0x%lx: removing key at 0%lx: 0x%x\n", (ulong)pdict, (ulong)pkp, (uint)*pkp); /* See the initial comment for why it is safe not to save */ /* the change if the keys array itself is new. */ if (must_save) ref_do_save_in(mem, &pdict->keys, pkp, "dict_undef(key)"); /* * Accumulating deleted entries slows down lookup. * Detect the easy case where we can use an empty entry * rather than a deleted one, namely, when the next entry * in the probe order is empty. */ if (pkp[-1] == packed_key_empty) { /* * In this case we can replace any preceding deleted keys with * empty ones as well. */ uint end = nslots(pdict); *pkp = packed_key_empty; if (must_save) { while (++index < end && *++pkp == packed_key_deleted) { ref_do_save_in(mem, &pdict->keys, pkp, "dict_undef(key)"); *pkp = packed_key_empty; } } else { while (++index < end && *++pkp == packed_key_deleted) *pkp = packed_key_empty; } } else *pkp = packed_key_deleted; } else { /* not packed */ ref *kp = pdict->keys.value.refs + index; if_debug4m('d', (const gs_memory_t *)mem, "[d]0x%lx: removing key at 0%lx: 0x%lx 0x%lx\n", (ulong)pdict, (ulong)kp, ((ulong *)kp)[0], ((ulong *)kp)[1]); make_null_old_in(mem, &pdict->keys, kp, "dict_undef(key)"); /* * Accumulating deleted entries slows down lookup. * Detect the easy case where we can use an empty entry * rather than a deleted one, namely, when the next entry * in the probe order is empty. */ if (!r_has_type(kp - 1, t_null) || /* full entry */ r_has_attr(kp - 1, a_executable) /* deleted or wraparound */ ) r_set_attrs(kp, a_executable); /* mark as deleted */ } ref_save_in(mem, pdref, &pdict->count, "dict_undef(count)"); pdict->count.value.intval--; /* If the key is a name, update its 1-element cache. */ if (r_has_type(pkey, t_name)) { name *pname = pkey->value.pname; if (pv_valid(pname->pvalue)) { #ifdef DEBUG /* Check the the cache is correct. */ if (!(pds && dstack_dict_is_permanent(pds, pdref))) lprintf1("dict_undef: cached name value pointer 0x%lx is incorrect!\n", (ulong) pname->pvalue); #endif /* Clear the cache */ pname->pvalue = pv_no_defn; } } make_null_old_in(mem, &pdict->values, pvslot, "dict_undef(value)"); return 0; }
/* * Enter a key-value pair in a dictionary. * See idict.h for the possible return values. */ int dict_put(ref * pdref /* t_dictionary */ , const ref * pkey, const ref * pvalue, dict_stack_t *pds) { dict *pdict = pdref->value.pdict; gs_ref_memory_t *mem = dict_memory(pdict); gs_memory_t *pmem = dict_mem(pdict); int rcode = 0; int code; ref *pvslot, kname; /* Check the value. */ store_check_dest(pdref, pvalue); top:if ((code = dict_find(pdref, pkey, &pvslot)) <= 0) { /* not found *//* Check for overflow */ uint index; switch (code) { case 0: break; case gs_error_dictfull: if (!pmem->gs_lib_ctx->dict_auto_expand) return_error(gs_error_dictfull); code = dict_grow(pdref, pds); if (code < 0) return code; goto top; /* keep things simple */ default: /* gs_error_typecheck */ return code; } index = pvslot - pdict->values.value.refs; /* If the key is a string, convert it to a name. */ if (r_has_type(pkey, t_string)) { int code; if (!r_has_attr(pkey, a_read)) return_error(gs_error_invalidaccess); code = name_from_string(pmem, pkey, &kname); if (code < 0) return code; pkey = &kname; } if (dict_is_packed(pdict)) { ref_packed *kp; if (!r_has_type(pkey, t_name) || name_index(pmem, pkey) > packed_name_max_index ) { /* Change to unpacked representation. */ int code = dict_unpack(pdref, pds); if (code < 0) return code; goto top; } kp = pdict->keys.value.writable_packed + index; if (ref_must_save_in(mem, &pdict->keys)) { /* See initial comment for why it is safe */ /* not to save the change if the keys */ /* array itself is new. */ ref_do_save_in(mem, &pdict->keys, kp, "dict_put(key)"); } *kp = pt_tag(pt_literal_name) + name_index(pmem, pkey); } else { ref *kp = pdict->keys.value.refs + index; if_debug2m('d', (const gs_memory_t *)mem, "[d]0x%lx: fill key at 0x%lx\n", (ulong) pdict, (ulong) kp); store_check_dest(pdref, pkey); ref_assign_old_in(mem, &pdict->keys, kp, pkey, "dict_put(key)"); /* set key of pair */ } ref_save_in(mem, pdref, &pdict->count, "dict_put(count)"); pdict->count.value.intval++; /* If the key is a name, update its 1-element cache. */ if (r_has_type(pkey, t_name)) { name *pname = pkey->value.pname; if (pname->pvalue == pv_no_defn && CAN_SET_PVALUE_CACHE(pds, pdref, mem) ) { /* Set the cache. */ if_debug0m('d', (const gs_memory_t *)mem, "[d]set cache\n"); pname->pvalue = pvslot; } else { /* The cache can't be used. */ if_debug0m('d', (const gs_memory_t *)mem, "[d]no cache\n"); pname->pvalue = pv_other; } } rcode = 1; } if_debug8m('d', (const gs_memory_t *)mem, "[d]0x%lx: put key 0x%lx 0x%lx\n value at 0x%lx: old 0x%lx 0x%lx, new 0x%lx 0x%lx\n", (ulong) pdref->value.pdict, ((const ulong *)pkey)[0], ((const ulong *)pkey)[1], (ulong) pvslot, ((const ulong *)pvslot)[0], ((const ulong *)pvslot)[1], ((const ulong *)pvalue)[0], ((const ulong *)pvalue)[1]); ref_assign_old_in(mem, &pdref->value.pdict->values, pvslot, pvalue, "dict_put(value)"); return rcode; }
/* * Look up a key in a dictionary. Store a pointer to the value slot * where found, or to the (value) slot for inserting. * See idict.h for the possible return values. */ int dict_find(const ref * pdref, const ref * pkey, ref ** ppvalue /* result is stored here */ ) { dict *pdict = pdref->value.pdict; uint size = npairs(pdict); register int etype; uint nidx; ref_packed kpack; uint hash; int ktype; const gs_memory_t *mem = dict_mem(pdict); /* Compute hash. The only types we bother with are strings, */ /* names, and (unlikely, but worth checking for) integers. */ switch (r_type(pkey)) { case t_name: nidx = name_index(mem, pkey); nh: hash = dict_name_index_hash(nidx); kpack = packed_name_key(nidx); ktype = t_name; break; case t_string: /* convert to a name first */ { ref nref; int code; if (!r_has_attr(pkey, a_read)) return_error(gs_error_invalidaccess); code = name_ref(mem, pkey->value.bytes, r_size(pkey), &nref, 1); if (code < 0) return code; nidx = name_index(mem, &nref); } goto nh; case t_real: /* * Make sure that equal reals and integers hash the same. */ { int expt, i; double mant = frexp(pkey->value.realval, &expt); /* * The value is mant * 2^expt, where 0.5 <= mant < 1, * or else expt == mant == 0. */ if (expt < sizeof(long) * 8 || pkey->value.realval == min_long) i = (int)pkey->value.realval; else i = (int)(mant * min_long); /* MSVC 6.00.8168.0 cannot compile this */ hash = (uint)i * 30503; /* with -O2 as a single expression */ } goto ih; case t_integer: hash = (uint)pkey->value.intval * 30503; ih: kpack = packed_key_impossible; ktype = -1; nidx = 0; /* only to pacify gcc */ break; case t_null: /* not allowed as a key */ return_error(gs_error_typecheck); default: hash = r_btype(pkey) * 99; /* yech */ kpack = packed_key_impossible; ktype = -1; nidx = 0; /* only to pacify gcc */ } /* Search the dictionary */ if (dict_is_packed(pdict)) { const ref_packed *pslot = 0; # define found *ppvalue = packed_search_value_pointer; return 1 # define deleted if (pslot == 0) pslot = kp # define missing goto miss # include "idicttpl.h" # undef missing # undef deleted # undef found /* * Double wraparound, dict is full. * Note that even if there was an empty slot (pslot != 0), * we must return dictfull if length = maxlength. */ if (pslot == 0 || d_length(pdict) == d_maxlength(pdict)) return_error(gs_error_dictfull); *ppvalue = pdict->values.value.refs + (pslot - kbot); return 0; miss: /* Key is missing, not double wrap. See above re dictfull. */ if (d_length(pdict) == d_maxlength(pdict)) return_error(gs_error_dictfull); if (pslot == 0) pslot = kp; *ppvalue = pdict->values.value.refs + (pslot - kbot); return 0; } else { ref *kbot = pdict->keys.value.refs; register ref *kp; ref *pslot = 0; int wrap = 0; for (kp = kbot + dict_hash_mod(hash, size) + 2;;) { --kp; if ((etype = r_type(kp)) == ktype) { /* Fast comparison if both keys are names */ if (name_index(mem, kp) == nidx) { *ppvalue = pdict->values.value.refs + (kp - kbot); return 1; } } else if (etype == t_null) { /* Empty, deleted, or wraparound. */ /* Figure out which. */ if (kp == kbot) { /* wrap */ if (wrap++) { /* wrapped twice */ if (pslot == 0) return_error(gs_error_dictfull); break; } kp += size + 1; } else if (r_has_attr(kp, a_executable)) { /* Deleted entry, save the slot. */ if (pslot == 0) pslot = kp; } else /* key not found */ break; } else { if (obj_eq(mem, kp, pkey)) { *ppvalue = pdict->values.value.refs + (kp - kbot); return 1; } } } if (d_length(pdict) == d_maxlength(pdict)) return_error(gs_error_dictfull); *ppvalue = pdict->values.value.refs + ((pslot != 0 ? pslot : kp) - kbot); return 0; } }
int obj_cvp(const ref * op, byte * str, uint len, uint * prlen, int full_print, uint start_pos, const gs_memory_t *mem, bool restart) { char buf[50]; /* big enough for any float, double, or struct name */ const byte *data = (const byte *)buf; uint size; int code; ref nref; if (full_print) { static const char * const type_strings[] = { REF_TYPE_PRINT_STRINGS }; switch (r_btype(op)) { case t_boolean: case t_integer: break; case t_real: { /* * To get fully accurate output results for IEEE * single-precision floats (24 bits of mantissa), the ANSI %g * default of 6 digits is not enough; 9 are needed. * Unfortunately, using %.9g for floats (as opposed to doubles) * produces unfortunate artifacts such as 0.01 5 mul printing as * 0.049999997. Therefore, we print using %g, and if the result * isn't accurate enough, print again using %.9g. * Unfortunately, a few PostScript programs 'know' that the * printed representation of floats fits into 6 digits (e.g., * with cvs). We resolve this by letting cvs, cvrs, and = do * what the Adobe interpreters appear to do (use %g), and only * produce accurate output for ==, for which there is no * analogue of cvs. What a hack! */ float value = op->value.realval; float scanned; sprintf(buf, "%g", value); sscanf(buf, "%f", &scanned); if (scanned != value) sprintf(buf, "%.9g", value); ensure_dot(buf); goto rs; } case t_operator: case t_oparray: code = obj_cvp(op, (byte *)buf + 2, sizeof(buf) - 4, &size, 0, 0, mem, restart); if (code < 0) return code; buf[0] = buf[1] = buf[size + 2] = buf[size + 3] = '-'; size += 4; goto nl; case t_name: if (r_has_attr(op, a_executable)) { code = obj_string_data(mem, op, &data, &size); if (code < 0) return code; goto nl; } if (start_pos > 0) return obj_cvp(op, str, len, prlen, 0, start_pos - 1, mem, restart); if (len < 1) return_error(e_rangecheck); code = obj_cvp(op, str + 1, len - 1, prlen, 0, 0, mem, restart); if (code < 0) return code; str[0] = '/'; ++*prlen; return code; case t_null: data = (const byte *)"null"; goto rs; case t_string: if (!r_has_attr(op, a_read)) goto other; size = r_size(op); { bool truncate = (full_print == 1 && size > CVP_MAX_STRING); stream_cursor_read r; stream_cursor_write w; uint skip; byte *wstr; uint len1; int status = 1; if (start_pos == 0) { if (len < 1) return_error(e_rangecheck); str[0] = '('; skip = 0; wstr = str + 1; } else { skip = start_pos - 1; wstr = str; } len1 = len + (str - wstr); r.ptr = op->value.const_bytes - 1; r.limit = r.ptr + (truncate ? CVP_MAX_STRING : size); while (skip && status == 1) { uint written; w.ptr = (byte *)buf - 1; w.limit = w.ptr + min(skip + len1, sizeof(buf)); status = s_PSSE_template.process(NULL, &r, &w, false); written = w.ptr - ((byte *)buf - 1); if (written > skip) { written -= skip; memcpy(wstr, buf + skip, written); wstr += written; skip = 0; break; } skip -= written; } /* * We can reach here with status == 0 (and skip != 0) if * start_pos lies within the trailing ")" or "...)". */ if (status == 0) { #ifdef DEBUG if (skip > (truncate ? 4 : 1)) { return_error(e_Fatal); } #endif } w.ptr = wstr - 1; w.limit = str - 1 + len; if (status == 1) status = s_PSSE_template.process(NULL, &r, &w, false); *prlen = w.ptr - (str - 1); if (status != 0) return 1; if (truncate) { if (len - *prlen < 4 - skip) return 1; memcpy(w.ptr + 1, "...)" + skip, 4 - skip); *prlen += 4 - skip; } else { if (len - *prlen < 1 - skip) return 1; memcpy(w.ptr + 1, ")" + skip, 1 - skip); *prlen += 1 - skip; } } return 0; case t_astruct: case t_struct: if (r_is_foreign(op)) { /* gs_object_type may not work. */ data = (const byte *)"-foreign-struct-"; goto rs; } if (!mem) { data = (const byte *)"-(struct)-"; goto rs; } data = (const byte *) gs_struct_type_name_string( gs_object_type(mem, (const obj_header_t *)op->value.pstruct)); size = strlen((const char *)data); if (size > 4 && !memcmp(data + size - 4, "type", 4)) size -= 4; if (size > sizeof(buf) - 2) return_error(e_rangecheck); buf[0] = '-'; memcpy(buf + 1, data, size); buf[size + 1] = '-'; size += 2; data = (const byte *)buf; goto nl; default: other: { int rtype = r_btype(op); if (rtype > countof(type_strings)) return_error(e_rangecheck); data = (const byte *)type_strings[rtype]; if (data == 0) return_error(e_rangecheck); } goto rs; } } /* full_print = 0 */ switch (r_btype(op)) { case t_boolean: data = (const byte *)(op->value.boolval ? "true" : "false"); break; case t_integer: sprintf(buf, "%ld", op->value.intval); break; case t_string: check_read(*op); /* falls through */ case t_name: code = obj_string_data(mem, op, &data, &size); if (code < 0) return code; goto nl; case t_oparray: { uint index = op_index(op); const op_array_table *opt = op_index_op_array_table(index); name_index_ref(mem, opt->nx_table[index - opt->base_index], &nref); name_string_ref(mem, &nref, &nref); code = obj_string_data(mem, &nref, &data, &size); if (code < 0) return code; goto nl; } case t_operator: { /* Recover the name from the initialization table. */ uint index = op_index(op); /* * Check the validity of the index. (An out-of-bounds index * is only possible when examining an invalid object using * the debugger.) */ if (index > 0 && index < op_def_count) { data = (const byte *)(op_index_def(index)->oname + 1); break; } /* Internal operator, no name. */ sprintf(buf, "@0x%lx", (ulong) op->value.opproc); break; } case t_real: /* * The value 0.0001 is a boundary case that the Adobe interpreters * print in f-format but at least some gs versions print in * e-format, presumably because of differences in the underlying C * library implementation. Work around this here. */ if (op->value.realval == (float)0.0001) { strcpy(buf, "0.0001"); } else { sprintf(buf, "%g", op->value.realval); } ensure_dot(buf); break; default: data = (const byte *)"--nostringval--"; } rs: size = strlen((const char *)data); nl: if (size < start_pos) return_error(e_rangecheck); if (!restart && size > len) return_error(e_rangecheck); size -= start_pos; *prlen = min(size, len); memmove(str, data + start_pos, *prlen); return (size > len); }
/* Remove the marks at the same time. */ static void refs_compact(const gs_memory_t *mem, obj_header_t * pre, obj_header_t * dpre, uint size) { ref_packed *dest; ref_packed *src; ref_packed *end; uint new_size; /* The next switch controls an optimization for the loop termination condition. It was useful during the development, when some assumptions were temporary wrong. We keep it for records. */ src = (ref_packed *) (pre + 1); end = (ref_packed *) ((byte *) src + size); /* * We know that a block of refs always ends with a * full-size ref, so we only need to check for reaching the end * of the block when we see one of those. */ if (dpre == pre) /* Loop while we don't need to copy. */ for (;;) { if (r_is_packed(src)) { if (!r_has_pmark(src)) break; if_debug1('8', " [8]packed ref 0x%lx \"copied\"\n", (ulong) src); *src &= ~lp_mark; src++; } else { /* full-size ref */ ref *const pref = (ref *)src; if (!r_has_attr(pref, l_mark)) break; if_debug1('8', " [8]ref 0x%lx \"copied\"\n", (ulong) src); r_clear_attrs(pref, l_mark); src += packed_per_ref; } } else *dpre = *pre; dest = (ref_packed *) ((char *)dpre + ((char *)src - (char *)pre)); for (;;) { if (r_is_packed(src)) { if (r_has_pmark(src)) { if_debug2('8', " [8]packed ref 0x%lx copied to 0x%lx\n", (ulong) src, (ulong) dest); *dest++ = *src & ~lp_mark; } src++; } else { /* full-size ref */ if (r_has_attr((ref *) src, l_mark)) { ref rtemp; if_debug2('8', " [8]ref 0x%lx copied to 0x%lx\n", (ulong) src, (ulong) dest); /* We can't just use ref_assign_inline, */ /* because the source and destination */ /* might overlap! */ ref_assign_inline(&rtemp, (ref *) src); r_clear_attrs(&rtemp, l_mark); ref_assign_inline((ref *) dest, &rtemp); src += packed_per_ref; dest += packed_per_ref; } else { /* check for end of block */ src += packed_per_ref; if (src >= end) break; } } } new_size = (byte *) dest - (byte *) (dpre + 1) + sizeof(ref); #ifdef DEBUG /* Check that the relocation came out OK. */ /* NOTE: this check only works within a single chunk. */ if ((byte *) src - (byte *) dest != r_size((ref *) src - 1) + sizeof(ref)) { lprintf3("Reloc error for refs 0x%lx: reloc = %lu, stored = %u\n", (ulong) dpre, (ulong) ((byte *) src - (byte *) dest), (uint) r_size((ref *) src - 1)); gs_abort(mem); } #endif /* Pad to a multiple of sizeof(ref). */ while (new_size & (sizeof(ref) - 1)) *dest++ = pt_tag(pt_integer), new_size += sizeof(ref_packed); /* We want to make the newly freed space into a free block, */ /* but we can only do this if we have enough room. */ if (size - new_size < sizeof(obj_header_t)) { /* Not enough room. Pad to original size. */ while (new_size < size) *dest++ = pt_tag(pt_integer), new_size += sizeof(ref_packed); } else { obj_header_t *pfree = (obj_header_t *) ((ref *) dest + 1); pfree->o_alone = 0; pfree->o_size = size - new_size - sizeof(obj_header_t); pfree->o_type = &st_bytes; } /* Re-create the final ref. */ r_set_type((ref *) dest, t_integer); dpre->o_size = new_size; }
static int zmatchmedia(i_ctx_t *i_ctx_p) { os_ptr op = osp; os_ptr preq = op - 3; os_ptr pattr = op - 2; os_ptr ppol = op - 1; os_ptr pkeys = op; /* *const */ int policy_default; float best_mismatch = (float)max_long; /* adhoc */ float mepos_penalty; float mbest = best_mismatch; match_record_t match; ref no_priority; ref *ppriority; int mepos, orient; bool roll; int code; int ai; struct mkd_ { ref key, dict; } aelt; if (r_has_type(pattr, t_null)) { check_op(4); make_null(op - 3); make_true(op - 2); pop(2); return 0; } check_type(*preq, t_dictionary); check_dict_read(*preq); check_type(*pattr, t_dictionary); check_dict_read(*pattr); check_type(*ppol, t_dictionary); check_dict_read(*ppol); check_array(*pkeys); check_read(*pkeys); switch (code = dict_int_null_param(preq, "MediaPosition", 0, 0x7fff, 0, &mepos)) { default: return code; case 2: case 1: mepos = -1; case 0:; } switch (code = dict_int_null_param(preq, "Orientation", 0, 3, 0, &orient)) { default: return code; case 2: case 1: orient = -1; case 0:; } code = dict_bool_param(preq, "RollFedMedia", false, &roll); if (code < 0) return code; code = dict_int_param(ppol, "PolicyNotFound", 0, 7, 0, &policy_default); if (code < 0) return code; if (dict_find_string(pattr, "Priority", &ppriority) > 0) { check_array_only(*ppriority); check_read(*ppriority); } else { make_empty_array(&no_priority, a_readonly); ppriority = &no_priority; } match.no_match_priority = r_size(ppriority); reset_match(&match); for (ai = dict_first(pattr); (ai = dict_next(pattr, ai, (ref * /*[2]*/)&aelt)) >= 0; ) { if (r_has_type(&aelt.dict, t_dictionary) && r_has_attr(dict_access_ref(&aelt.dict), a_read) && r_has_type(&aelt.key, t_integer) ) { bool match_all; uint ki, pi; code = dict_bool_param(&aelt.dict, "MatchAll", false, &match_all); if (code < 0) return code; for (ki = 0; ki < r_size(pkeys); ki++) { ref key; ref kstr; ref *prvalue; ref *pmvalue; ref *ppvalue; int policy; array_get(imemory, pkeys, ki, &key); if (dict_find(&aelt.dict, &key, &pmvalue) <= 0) continue; if (dict_find(preq, &key, &prvalue) <= 0 || r_has_type(prvalue, t_null) ) { if (match_all) goto no; else continue; } /* Look for the Policies entry for this key. */ if (dict_find(ppol, &key, &ppvalue) > 0) { check_type_only(*ppvalue, t_integer); policy = ppvalue->value.intval; } else policy = policy_default; /* * Match a requested attribute value with the attribute value in the * description of a medium. For all attributes except PageSize, * matching means equality. PageSize is special; see match_page_size * below. */ if (r_has_type(&key, t_name) && (name_string_ref(imemory, &key, &kstr), r_size(&kstr) == 8 && !memcmp(kstr.value.bytes, "PageSize", 8)) ) { gs_matrix ignore_mat; gs_point ignore_msize; if (zmatch_page_size(imemory, prvalue, pmvalue, policy, orient, roll, &best_mismatch, &ignore_mat, &ignore_msize) <= 0) goto no; } else if (!obj_eq(imemory, prvalue, pmvalue)) goto no; } mepos_penalty = (mepos < 0 || aelt.key.value.intval == mepos) ? 0 : .001; /* We have a match. Save the match in case no better match is found */ if (r_has_type(&match.match_key, t_null)) match.match_key = aelt.key; /* * If it is a better match than the current best it supersedes it * regardless of priority. If the match is the same, then update * to the current only if the key value is lower. */ if (best_mismatch + mepos_penalty <= mbest) { if (best_mismatch + mepos_penalty < mbest || (r_has_type(&match.match_key, t_integer) && match.match_key.value.intval > aelt.key.value.intval)) { reset_match(&match); match.match_key = aelt.key; mbest = best_mismatch + mepos_penalty; } } /* In case of a tie, see if the new match has priority. */ for (pi = match.priority; pi > 0;) { ref pri; pi--; array_get(imemory, ppriority, pi, &pri); if (obj_eq(imemory, &aelt.key, &pri)) { /* Yes, higher priority. */ match.best_key = aelt.key; match.priority = pi; break; } } no:; } } if (r_has_type(&match.match_key, t_null)) { make_false(op - 3); pop(3); } else { if (r_has_type(&match.best_key, t_null)) op[-3] = match.match_key; else op[-3] = match.best_key; make_true(op - 2); pop(2); } return 0; }
/* Append a user path to the current path. */ static inline int upath_append_aux(os_ptr oppath, i_ctx_t *i_ctx_p, int *pnargs, bool upath_compat) { upath_state ups = UPS_INITIAL; ref opcodes; if (r_has_type(oppath, t__invalid)) return_error(e_stackunderflow); if (!r_is_array(oppath)) return_error(e_typecheck); check_read(*oppath); gs_newpath(igs); /****** ROUND tx AND ty ******/ if ( r_size(oppath) == 2 && array_get(imemory, oppath, 1, &opcodes) >= 0 && r_has_type(&opcodes, t_string) ) { /* 1st element is operands, 2nd is operators */ ref operands; int code, format; int repcount = 1; const byte *opp; uint ocount, i = 0; array_get(imemory, oppath, 0, &operands); code = num_array_format(&operands); if (code < 0) return code; format = code; check_read(opcodes); opp = opcodes.value.bytes; ocount = r_size(&opcodes); while (ocount--) { byte opx = *opp++; if (opx > UPATH_REPEAT) repcount = opx - UPATH_REPEAT; else if (opx > UPATH_MAX_OP) return_error(e_rangecheck); else { /* operator */ const up_data_t data = up_data[opx]; *pnargs = 0; /* in case of error */ if (upath_compat && opx == upath_op_ucache) { /* CPSI does not complain about incorrect ucache placement, even though PLRM3 says it's illegal. */ ups = ups > UPS_UCACHE ? ups : data.state_after; } else { if (!(ups & data.states_before)) return_error(e_typecheck); ups = data.state_after; } do { os_ptr op = osp; byte opargs = data.num_args; while (opargs--) { push(1); (*pnargs)++; /* in case of error */ code = num_array_get(imemory, &operands, format, i++, op); switch (code) { case t_integer: r_set_type_attrs(op, t_integer, 0); break; case t_real: r_set_type_attrs(op, t_real, 0); break; default: return_error(e_typecheck); } } code = (*up_ops[opx])(i_ctx_p); if (code < 0) return code; } while (--repcount); repcount = 1; } } } else { /* Ordinary executable array. */ const ref *arp = oppath; uint ocount = r_size(oppath); long index = 0; int argcount = 0; op_proc_t oproc; int opx, code; for (; index < ocount; index++) { ref rup; ref *defp; os_ptr op = osp; up_data_t data; *pnargs = argcount; array_get(imemory, arp, index, &rup); switch (r_type(&rup)) { case t_integer: case t_real: argcount++; push(1); *op = rup; break; case t_name: if (!r_has_attr(&rup, a_executable) || dict_find(systemdict, &rup, &defp) <= 0 || r_btype(defp) != t_operator) return_error(e_typecheck); /* all errors = typecheck */ goto xop; case t_operator: defp = &rup; xop:if (!r_has_attr(defp, a_executable)) return_error(e_typecheck); oproc = real_opproc(defp); for (opx = 0; opx <= UPATH_MAX_OP; opx++) if (oproc == up_ops[opx]) break; if (opx > UPATH_MAX_OP) return_error(e_typecheck); data = up_data[opx]; if (argcount != data.num_args) return_error(e_typecheck); if (upath_compat && opx == upath_op_ucache) { /* CPSI does not complain about incorrect ucache placement, even though PLRM3 says it's illegal. */ ups = ups > UPS_UCACHE ? ups : data.state_after; } else { if (!(ups & data.states_before)) return_error(e_typecheck); ups = data.state_after; } code = (*up_ops[opx])(i_ctx_p); if (code < 0) { if (code == e_nocurrentpoint) return_error(e_rangecheck); /* CET 11-22 */ return code; } argcount = 0; break; default: return_error(e_typecheck); } } if (argcount) { *pnargs = argcount; return_error(e_typecheck); /* leftover args */ } } if (ups < UPS_SETBBOX) return_error(e_typecheck); /* no setbbox */ if (ups == UPS_SETBBOX && upath_compat) { /* * In CPSI compatibility mode, an empty path with a setbbox also * does a moveto (but only if the path is empty). Since setbbox * was the last operator, its operands are still on the o-stack. */ osp += 2; return zmoveto(i_ctx_p); } return 0; }
static int zbind(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint depth = 1; ref defn; register os_ptr bsp; switch (r_type(op)) { case t_array: if (!r_has_attr(op, a_write)) { return 0; /* per PLRM3 */ } case t_mixedarray: case t_shortarray: defn = *op; break; case t_oparray: defn = *op->value.const_refs; break; default: return_op_typecheck(op); } push(1); *op = defn; bsp = op; /* * We must not make the top-level procedure read-only, * but we must bind it even if it is read-only already. * * Here are the invariants for the following loop: * `depth' elements have been pushed on the ostack; * For i < depth, p = ref_stack_index(&o_stack, i): * *p is an array (or packedarray) ref. */ while (depth) { while (r_size(bsp)) { ref_packed *const tpp = (ref_packed *)bsp->value.packed; /* break const */ r_dec_size(bsp, 1); if (r_is_packed(tpp)) { /* Check for a packed executable name */ ushort elt = *tpp; if (r_packed_is_exec_name(&elt)) { ref nref; ref *pvalue; name_index_ref(imemory, packed_name_index(&elt), &nref); if ((pvalue = dict_find_name(&nref)) != 0 && r_is_ex_oper(pvalue) ) { store_check_dest(bsp, pvalue); /* * Always save the change, since this can only * happen once. */ ref_do_save(bsp, tpp, "bind"); *tpp = pt_tag(pt_executable_operator) + op_index(pvalue); } } bsp->value.packed = tpp + 1; } else { ref *const tp = bsp->value.refs++; switch (r_type(tp)) { case t_name: /* bind the name if an operator */ if (r_has_attr(tp, a_executable)) { ref *pvalue; if ((pvalue = dict_find_name(tp)) != 0 && r_is_ex_oper(pvalue) ) { store_check_dest(bsp, pvalue); ref_assign_old(bsp, tp, pvalue, "bind"); } } break; case t_array: /* push into array if writable */ if (!r_has_attr(tp, a_write)) break; case t_mixedarray: case t_shortarray: if (r_has_attr(tp, a_executable)) { /* Make reference read-only */ r_clear_attrs(tp, a_write); if (bsp >= ostop) { /* Push a new stack block. */ ref temp; int code; temp = *tp; osp = bsp; code = ref_stack_push(&o_stack, 1); if (code < 0) { ref_stack_pop(&o_stack, depth); return_error(code); } bsp = osp; *bsp = temp; } else *++bsp = *tp; depth++; } } } } bsp--; depth--; if (bsp < osbot) { /* Pop back to the previous stack block. */ osp = bsp; ref_stack_pop_block(&o_stack); bsp = osp; } } osp = bsp; return 0; }
/* Check a stack to make sure all its elements are older than a save. */ static int restore_check_stack(const i_ctx_t *i_ctx_p, const ref_stack_t * pstack, const alloc_save_t * asave, bool is_estack) { ref_stack_enum_t rsenum; ref_stack_enum_begin(&rsenum, pstack); do { const ref *stkp = rsenum.ptr; uint size = rsenum.size; for (; size; stkp++, size--) { const void *ptr; switch (r_type(stkp)) { case t_array: /* * Zero-length arrays are a special case: see the * t_*array case (label rr:) in igc.c:gc_trace. */ if (r_size(stkp) == 0) { /*stkp->value.refs = (void *)0;*/ continue; } ptr = stkp->value.refs; break; case t_dictionary: ptr = stkp->value.pdict; break; case t_file: /* Don't check executable or closed literal */ /* files on the e-stack. */ { stream *s; if (is_estack && (r_has_attr(stkp, a_executable) || file_is_invalid(s, stkp)) ) continue; } ptr = stkp->value.pfile; break; case t_name: /* Names are special because of how they are allocated. */ if (alloc_name_is_since_save((const gs_memory_t *)pstack->memory, stkp, asave)) return_error(e_invalidrestore); continue; case t_string: /* Don't check empty executable strings */ /* on the e-stack. */ if (r_size(stkp) == 0 && r_has_attr(stkp, a_executable) && is_estack ) continue; ptr = stkp->value.bytes; break; case t_mixedarray: case t_shortarray: /* See the t_array case above. */ if (r_size(stkp) == 0) { /*stkp->value.packed = (void *)0;*/ continue; } ptr = stkp->value.packed; break; case t_device: ptr = stkp->value.pdevice; break; case t_fontID: case t_struct: case t_astruct: ptr = stkp->value.pstruct; break; case t_save: /* See the comment in isave.h regarding the following. */ if (i_ctx_p->language_level <= 2) continue; ptr = alloc_find_save(&gs_imemory, stkp->value.saveid); /* * Invalid save objects aren't supposed to be possible * in LL3, but just in case.... */ if (ptr == 0) return_error(e_invalidrestore); if (ptr == asave) continue; break; default: continue; } if (alloc_is_since_save(ptr, asave)) return_error(e_invalidrestore); } } while (ref_stack_enum_next(&rsenum)); return 0; /* OK */ }
static int zeqproc(i_ctx_t *i_ctx_p) { os_ptr op = osp; ref2_t stack[MAX_DEPTH + 1]; ref2_t *top = stack; make_array(&stack[0].proc1, 0, 1, op - 1); make_array(&stack[0].proc2, 0, 1, op); for (;;) { long i; if (r_size(&top->proc1) == 0) { /* Finished these arrays, go up to next level. */ if (top == stack) { /* We're done matching: it succeeded. */ make_true(op - 1); pop(1); return 0; } --top; continue; } /* Look at the next elements of the arrays. */ i = r_size(&top->proc1) - 1; array_get(imemory, &top->proc1, i, &top[1].proc1); array_get(imemory, &top->proc2, i, &top[1].proc2); r_dec_size(&top->proc1, 1); ++top; /* * Amazingly enough, the objects' executable attributes are not * required to match. This means { x load } will match { /x load }, * even though this is clearly wrong. */ #if 0 if (r_has_attr(&top->proc1, a_executable) != r_has_attr(&top->proc2, a_executable) ) break; #endif if (obj_eq(imemory, &top->proc1, &top->proc2)) { /* Names don't match strings. */ if (r_type(&top->proc1) != r_type(&top->proc2) && (r_type(&top->proc1) == t_name || r_type(&top->proc2) == t_name) ) break; --top; /* no recursion */ continue; } if (r_is_array(&top->proc1) && r_is_array(&top->proc2) && r_size(&top->proc1) == r_size(&top->proc2) && top < stack + (MAX_DEPTH - 1) ) { /* Descend into the arrays. */ continue; } break; } /* An exit from the loop indicates that matching failed. */ make_false(op - 1); pop(1); return 0; }
/* or a negative error code. */ int build_gs_font(i_ctx_t *i_ctx_p, os_ptr op, gs_font ** ppfont, font_type ftype, gs_memory_type_ptr_t pstype, const build_proc_refs * pbuild, build_font_options_t options) { ref kname; /* t_string */ ref *pftype; ref *pencoding = 0; bool bitmapwidths; int exactsize, inbetweensize, transformedchar; int wmode; int code; gs_font *pfont; ref *pfid; ref *aop = dict_access_ref(op); bool cpsi_mode = gs_currentcpsimode(imemory); get_font_name(imemory, &kname, op - 1); if (dict_find_string(op, "FontType", &pftype) <= 0 || !r_has_type(pftype, t_integer) || pftype->value.intval != (int)ftype ) return_error(e_invalidfont); if (dict_find_string(op, "Encoding", &pencoding) <= 0) { if (!(options & bf_Encoding_optional)) return_error(e_invalidfont); pencoding = 0; } else { if (!r_is_array(pencoding)) return_error(e_invalidfont); } if (pencoding) { /* observed Adobe behavior */ int count = r_size(pencoding); int type = ftype ? t_name : t_integer; bool fixit = false; while (count--) { ref r; if ((code = array_get(imemory, pencoding, count, &r)) < 0 || !(r_has_type(&r, type) || r_has_type(&r, t_null))) { if (!cpsi_mode && ftype == ft_user_defined) { if (code < 0 || r_has_type(&r, t_null)) { return_error(e_typecheck); } fixit = true; break; } else { return_error(e_typecheck); } } } /* For at least Type 3 fonts, Adobe Distiller will "fix" an Encoding array, as in, for example * Bug 692681 where the arrays contain integers rather than names. Once the font is instantiated * the integers have been converted to names. * It is preferable to to this manipulation here, rather than in Postscript, because we are less * restricted by read-only attributes and VM save levels. */ if (fixit) { ref penc; uint size = 0; char buf[32], *bptr; avm_space curglob = ialloc_space(idmemory); avm_space useglob = r_is_local(pencoding) ? avm_local : avm_global; ialloc_set_space(idmemory, useglob); count = r_size(pencoding); if ((code = ialloc_ref_array(&penc, (r_type_attrs(pencoding) & a_readonly), count, "build_gs_font")) < 0) return code; while (count--) { ref r; if (array_get(imemory, pencoding, count, &r) < 0){ return_error(e_typecheck); } /* For type 3, we know the Encoding entries must be names */ if (r_has_type(&r, t_name)){ ref_assign(&(penc.value.refs[count]), &r); } else { if ((code = obj_cvs(imemory, &r, (byte *)buf, 32, &size, (const byte **)(&bptr))) < 0) { return(code); } if ((code = name_ref(imemory, (const byte *)bptr, size, &r, true)) < 0) return code; ref_assign(&(penc.value.refs[count]), &r); } } if ((code = dict_put_string(osp, "Encoding", &penc, NULL)) < 0) return code; ialloc_set_space(idmemory, curglob); } } if ((code = dict_int_param(op, "WMode", 0, 1, 0, &wmode)) < 0 || (code = dict_bool_param(op, "BitmapWidths", false, &bitmapwidths)) < 0 || (code = dict_int_param(op, "ExactSize", 0, 2, fbit_use_bitmaps, &exactsize)) < 0 || (code = dict_int_param(op, "InBetweenSize", 0, 2, fbit_use_outlines, &inbetweensize)) < 0 || (code = dict_int_param(op, "TransformedChar", 0, 2, fbit_use_outlines, &transformedchar)) < 0 ) return code; code = dict_find_string(op, "FID", &pfid); if (code > 0 && r_has_type(pfid, t_fontID)) { /* silently ignore invalid FID per CET 13-05.ps */ /* * If this font has a FID entry already, it might be a scaled font * made by makefont or scalefont; in a Level 2 environment, it might * be an existing font being registered under a second name, or a * re-encoded font (which was invalid in Level 1, but dvips did it * anyway). */ pfont = r_ptr(pfid, gs_font); /* * If the following condition is false this is a re-encoded font, * or some other questionable situation in which the FID * was preserved. Pretend the FID wasn't there. */ if (obj_eq(pfont->memory, pfont_dict(pfont), op)) { if (pfont->base == pfont) { /* original font */ if (!level2_enabled) return_error(e_invalidfont); *ppfont = pfont; return 1; } else { /* This was made by makefont or scalefont. */ /* Just insert the new name. */ gs_matrix mat; ref fname; /* t_string */ code = sub_font_params(imemory, op, &mat, NULL, &fname); if (code < 0) return code; code = 1; copy_font_name(&pfont->font_name, &fname); goto set_name; } } } /* This is a new font. */ if (!r_has_attr(aop, a_write)) return_error(e_invalidaccess); { ref encoding; /* * Since add_FID may resize the dictionary and cause * pencoding to become invalid, save the Encoding. */ if (pencoding) { encoding = *pencoding; pencoding = &encoding; } code = build_gs_sub_font(i_ctx_p, op, &pfont, ftype, pstype, pbuild, pencoding, op); if (code < 0) return code; } pfont->BitmapWidths = bitmapwidths; pfont->ExactSize = (fbit_type)exactsize; pfont->InBetweenSize = (fbit_type)inbetweensize; pfont->TransformedChar = (fbit_type)transformedchar; pfont->WMode = wmode; pfont->procs.font_info = zfont_info; code = 0; set_name: copy_font_name(&pfont->key_name, &kname); *ppfont = pfont; return code; }