/* This is the Level 2 >> operator. */ static int zdicttomark(i_ctx_t *i_ctx_p) { uint count2 = ref_stack_counttomark(&o_stack); ref rdict; int code; uint idx; if (count2 == 0) return_error(e_unmatchedmark); count2--; if ((count2 & 1) != 0) return_error(e_rangecheck); code = dict_create(count2 >> 1, &rdict); if (code < 0) return code; /* << /a 1 /a 2 >> => << /a 1 >>, i.e., */ /* we must enter the keys in top-to-bottom order. */ for (idx = 0; idx < count2; idx += 2) { code = idict_put(&rdict, ref_stack_index(&o_stack, idx + 1), ref_stack_index(&o_stack, idx)); if (code < 0) { /* There's no way to free the dictionary -- too bad. */ return code; } } ref_stack_pop(&o_stack, count2); ref_assign(osp, &rdict); return code; }
/* <obj1> ... <objn> <int> copy <obj1> ... <objn> <obj1> ... <objn> */ static int zcopy_integer(i_ctx_t *i_ctx_p) { os_ptr op = osp; os_ptr op1 = op - 1; int count, i; int code; if ((uint) op->value.intval > (uint)(op - osbot)) { /* There might be enough elements in other blocks. */ check_type(*op, t_integer); if (op->value.intval >= (int)ref_stack_count(&o_stack)) return_error(e_stackunderflow); if (op->value.intval < 0) return_error(e_rangecheck); check_int_ltu(*op, ref_stack_count(&o_stack)); count = op->value.intval; } else if (op1 + (count = op->value.intval) <= ostop) { /* Fast case. */ memcpy((char *)op, (char *)(op - count), count * sizeof(ref)); push(count - 1); return 0; } /* Do it the slow, general way. */ code = ref_stack_push(&o_stack, count - 1); if (code < 0) return code; for (i = 0; i < count; i++) *ref_stack_index(&o_stack, i) = *ref_stack_index(&o_stack, i + count); return 0; }
/* Common functionality of zgethardwareparms & zgetdeviceparams */ static int zget_device_params(i_ctx_t *i_ctx_p, bool is_hardware) { os_ptr op = osp; ref rkeys; gx_device *dev; stack_param_list list; int code; ref *pmark; check_read_type(op[-1], t_device); rkeys = *op; dev = op[-1].value.pdevice; pop(1); stack_param_list_write(&list, &o_stack, &rkeys, iimemory); code = gs_get_device_or_hardware_params(dev, (gs_param_list *) & list, is_hardware); if (code < 0) { /* We have to put back the top argument. */ if (list.count > 0) ref_stack_pop(&o_stack, list.count * 2 - 1); else ref_stack_push(&o_stack, 1); *osp = rkeys; return code; } pmark = ref_stack_index(&o_stack, list.count * 2); make_mark(pmark); return 0; }
/* <key> load <value> */ static int zload(i_ctx_t *i_ctx_p) { os_ptr op = osp; ref *pvalue; switch (r_type(op)) { case t_name: /* Use the fast lookup. */ if ((pvalue = dict_find_name(op)) == 0) return_error(e_undefined); ref_assign(op, pvalue); return 0; case t_null: return_error(e_typecheck); case t__invalid: return_error(e_stackunderflow); default: { /* Use an explicit loop. */ uint size = ref_stack_count(&d_stack); uint i; for (i = 0; i < size; i++) { ref *dp = ref_stack_index(&d_stack, i); check_dict_read(*dp); if (dict_find(dp, op, &pvalue) > 0) { ref_assign(op, pvalue); return 0; } } return_error(e_undefined); } } }
/* width for an xfont character. */ static int op_show_return_width(i_ctx_t *i_ctx_p, uint npop, double *pwidth) { uint index = op_show_find_index(i_ctx_p); es_ptr ep = (es_ptr) ref_stack_index(&e_stack, index - (snumpush - 1)); int code = gs_text_setcharwidth(esenum(ep), pwidth); uint ocount, dsaved, dcount; if (code < 0) return code; /* Restore the operand and dictionary stacks. */ ocount = ref_stack_count(&o_stack) - (uint) esodepth(ep).value.intval; if (ocount < npop) return_error(e_stackunderflow); dsaved = (uint) esddepth(ep).value.intval; dcount = ref_stack_count(&d_stack); if (dcount < dsaved) return_error(e_dictstackunderflow); while (dcount > dsaved) { code = zend(i_ctx_p); if (code < 0) return code; dcount--; } ref_stack_pop(&o_stack, ocount); /* We don't want to pop the mark or the continuation */ /* procedure (op_show_continue or cshow_continue). */ pop_estack(i_ctx_p, index - snumpush); return o_pop_estack; }
/* <obj1> ... <objn> <n> .execn - */ static int zexecn(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint n, i; es_ptr esp_orig; check_int_leu(*op, max_uint - 1); n = (uint) op->value.intval; check_op(n + 1); check_estack(n); esp_orig = esp; for (i = 0; i < n; ++i) { const ref *rp = ref_stack_index(&o_stack, (long)(i + 1)); /* Make sure this object is legal to execute. */ if (ref_type_uses_access(r_type(rp))) { if (!r_has_attr(rp, a_execute) && r_has_attr(rp, a_executable) ) { esp = esp_orig; return_error(e_invalidaccess); } } /* Executable nulls have a special meaning on the e-stack, */ /* so since they are no-ops, don't push them. */ if (!r_has_type_attrs(rp, t_null, a_executable)) { ++esp; ref_assign(esp, rp); } } esfile_check_cache(); pop(n + 1); return o_push_estack; }
/* Find the current show enumerator on the e-stack. */ gs_text_enum_t * op_show_find(i_ctx_t *i_ctx_p) { uint index = op_show_find_index(i_ctx_p); if (index == 0) return 0; /* no mark */ return r_ptr(ref_stack_index(&e_stack, index - (snumpush - 1)), gs_text_enum_t); }
/* Implementation for putting parameters from a stack. */ static int stack_param_read(iparam_list * plist, const ref * pkey, iparam_loc * ploc) { stack_param_list *const splist = (stack_param_list *) plist; ref_stack_t *pstack = splist->pstack; /* This implementation is slow, but it probably doesn't matter. */ uint index = splist->skip + 1; uint count = splist->count; for (; count; count--, index += 2) { const ref *p = ref_stack_index(pstack, index); if (r_has_type(p, t_name) && name_eq(p, pkey)) { ploc->pvalue = ref_stack_index(pstack, index - 1); ploc->presult = &plist->results[count - 1]; *ploc->presult = 1; return 0; } } return 1; }
/* r_size(op1) was set just above. */ static int do_execstack(i_ctx_t *i_ctx_p, bool include_marks, os_ptr op1) { os_ptr op = osp; ref *arefs = op1->value.refs; uint asize = r_size(op1); uint i; ref *rq; /* * Copy elements from the stack to the array, * optionally skipping executable nulls. * Clear the executable bit in any internal operators, and * convert t_structs and t_astructs (which can only appear * in connection with stack marks, which means that they will * probably be freed when unwinding) to something harmless. */ for (i = 0, rq = arefs + asize; rq != arefs; ++i) { const ref *rp = ref_stack_index(&e_stack, (long)i); if (r_has_type_attrs(rp, t_null, a_executable) && !include_marks) continue; --rq; ref_assign_old(op1, rq, rp, "execstack"); switch (r_type(rq)) { case t_operator: { uint opidx = op_index(rq); if (opidx == 0 || op_def_is_internal(op_index_def(opidx))) r_clear_attrs(rq, a_executable); break; } case t_struct: case t_astruct: { const char *tname = rq->value.pstruct ? gs_struct_type_name_string( gs_object_type(imemory, rq->value.pstruct)) : "NULL"; make_const_string(rq, a_readonly | avm_foreign, strlen(tname), (const byte *)tname); break; } default: ; } } pop(op - op1); return 0; }
/* * Count the number of elements on the exec stack, with or without * the normally invisible elements (*op is a Boolean that indicates this). */ static uint count_exec_stack(i_ctx_t *i_ctx_p, bool include_marks) { uint count = ref_stack_count(&e_stack); if (!include_marks) { uint i; for (i = count; i--;) if (r_has_type_attrs(ref_stack_index(&e_stack, (long)i), t_null, a_executable)) --count; } return count; }
/* <mask> .instopped <result> true */ static int zinstopped(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint count; check_type(*op, t_integer); count = count_to_stopped(i_ctx_p, op->value.intval); if (count) { push(1); op[-1] = *ref_stack_index(&e_stack, count - 2); /* default result */ make_true(op); } else make_false(op); return 0; }
/* * Pop the e-stack, executing cleanup procedures as needed. * We could make this more efficient using ref_stack_enum_*, * but it isn't used enough to make this worthwhile. */ void pop_estack(i_ctx_t *i_ctx_p, uint count) { uint idx = 0; uint popped = 0; esfile_clear_cache(); for (; idx < count; idx++) { ref *ep = ref_stack_index(&e_stack, idx - popped); if (r_is_estack_mark(ep)) { ref_stack_pop(&e_stack, idx + 1 - popped); popped = idx + 1; (*real_opproc(ep)) (i_ctx_p); } } ref_stack_pop(&e_stack, count - popped); }
/* Check whether a dictionary is one of the permanent ones on the d-stack. */ bool dstack_dict_is_permanent(const dict_stack_t * pds, const ref * pdref) { dict *pdict = pdref->value.pdict; int i; if (pds->stack.extension_size == 0) { /* Only one block of d-stack. */ for (i = 0; i < pds->min_size; ++i) if (pds->stack.bot[i].value.pdict == pdict) return true; } else { /* More than one block of d-stack. */ uint count = ref_stack_count(&pds->stack); for (i = count - pds->min_size; i < count; ++i) if (ref_stack_index(&pds->stack, i)->value.pdict == pdict) return true; } return false; }
/* Implementation for enumerating parameters on a stack */ static int /* ret 0 ok, 1 if EOF, or -ve err */ stack_param_enumerate(iparam_list * plist, gs_param_enumerator_t * penum, gs_param_key_t * key, ref_type * type) { int code; stack_param_list *const splist = (stack_param_list *) plist; int index = penum->intval; ref *stack_element; do { stack_element = ref_stack_index(splist->pstack, index + 1 + splist->skip); if (!stack_element) return 1; } while (index += 2, !r_has_type(stack_element, t_name)); *type = r_type(stack_element); code = ref_to_key(stack_element, key, plist); penum->intval = index; return code; }
/* Implementation for getting parameters to a stack. */ static int stack_param_write(iparam_list * plist, const ref * pkey, const ref * pvalue) { stack_param_list *const splist = (stack_param_list *) plist; ref_stack_t *pstack = splist->pstack; s_ptr p = pstack->p; if (pstack->top - p < 2) { int code = ref_stack_push(pstack, 2); if (code < 0) return code; *ref_stack_index(pstack, 1) = *pkey; p = pstack->p; } else { pstack->p = p += 2; p[-1] = *pkey; } *p = *pvalue; splist->count++; return 0; }
/* <array> aload <obj_0> ... <obj_n-1> <array> */ static int zaload(i_ctx_t *i_ctx_p) { os_ptr op = osp; ref aref; uint asize; ref_assign(&aref, op); if (!r_is_array(&aref)) return_op_typecheck(op); check_read(aref); asize = r_size(&aref); if (asize > ostop - op) { /* Use the slow, general algorithm. */ int code = ref_stack_push(&o_stack, asize); uint i; const ref_packed *packed = aref.value.packed; if (code < 0) return code; for (i = asize; i > 0; i--, packed = packed_next(packed)) packed_get(imemory, packed, ref_stack_index(&o_stack, i)); *osp = aref; return 0; } if (r_has_type(&aref, t_array)) memcpy(op, aref.value.refs, asize * sizeof(ref)); else { uint i; const ref_packed *packed = aref.value.packed; os_ptr pdest = op; for (i = 0; i < asize; i++, pdest++, packed = packed_next(packed)) packed_get(imemory, packed, pdest); } push(asize); ref_assign(op, &aref); return 0; }
/* <iodevice> .getdevparams <mark> <name> <value> ... */ static int zgetdevparams(i_ctx_t *i_ctx_p) { os_ptr op = osp; gx_io_device *iodev; stack_param_list list; gs_param_list *const plist = (gs_param_list *) & list; int code; ref *pmark; check_read_type(*op, t_string); iodev = gs_findiodevice(imemory, op->value.bytes, r_size(op)); if (iodev == 0) return_error(e_undefined); stack_param_list_write(&list, &o_stack, NULL, iimemory); if ((code = gs_getdevparams(iodev, plist)) < 0) { ref_stack_pop(&o_stack, list.count * 2); return code; } pmark = ref_stack_index(&o_stack, list.count * 2); make_mark(pmark); return 0; }
/* <obj_0> ... <obj_n-1> <array> astore <array> */ static int zastore(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint size; int code; if (!r_is_array(op)) return_op_typecheck(op); size = r_size(op); /* Amazingly, the following is valid: 0 array noaccess astore */ if (size == 0) return 0; if (!r_has_type_attrs(op, t_array, a_write)) return_error(gs_error_invalidaccess); if (size > op - osbot) { /* The store operation might involve other stack segments. */ ref arr; if (size >= ref_stack_count(&o_stack)) return_error(gs_error_stackunderflow); arr = *op; code = ref_stack_store(&o_stack, &arr, size, 1, 0, true, idmemory, "astore"); if (code < 0) return code; ref_stack_pop(&o_stack, size); *ref_stack_index(&o_stack, 0) = arr; } else { code = refcpy_to_old(op, 0, op - size, size, idmemory, "astore"); if (code < 0) return code; op[-(int)size] = *op; pop(size); } return 0; }
/* ensuring that refs in mixed arrays are properly aligned. */ #undef idmemory /****** NOTA BENE ******/ int make_packed_array(ref * parr, ref_stack_t * pstack, uint size, gs_dual_memory_t *idmemory, client_name_t cname) { uint i; const ref *pref; uint idest = 0, ishort = 0; ref_packed *pbody; ref_packed *pdest; ref_packed *pshort; /* points to start of */ /* last run of short elements */ gs_ref_memory_t *imem = idmemory->current; uint space = imemory_space(imem); int skip = 0, pad; ref rtemp; int code; /* Do a first pass to calculate the size of the array, */ /* and to detect local-into-global stores. */ for (i = size; i != 0; i--) { pref = ref_stack_index(pstack, i - 1); switch (r_btype(pref)) { /* not r_type, opers are special */ case t_name: if (name_index(imem, pref) >= packed_name_max_index) break; /* can't pack */ idest++; continue; case t_integer: if (pref->value.intval < packed_min_intval || pref->value.intval > packed_max_intval ) break; idest++; continue; case t_oparray: /* Check for local-into-global store. */ store_check_space(space, pref); /* falls through */ case t_operator: { uint oidx; if (!r_has_attr(pref, a_executable)) break; oidx = op_index(pref); if (oidx == 0 || oidx > packed_int_mask) break; } idest++; continue; default: /* Check for local-into-global store. */ store_check_space(space, pref); } /* Can't pack this element, use a full ref. */ /* We may have to unpack up to align_packed_per_ref - 1 */ /* preceding short elements. */ /* If we are at the beginning of the array, however, */ /* we can just move the elements up. */ { int i = (idest - ishort) & (align_packed_per_ref - 1); if (ishort == 0) /* first time */ idest += skip = -i & (align_packed_per_ref - 1); else idest += (packed_per_ref - 1) * i; } ishort = idest += packed_per_ref; } pad = -(int)idest & (packed_per_ref - 1); /* padding at end */ /* Now we can allocate the array. */ code = gs_alloc_ref_array(imem, &rtemp, 0, (idest + pad) / packed_per_ref, cname); if (code < 0) return code; pbody = (ref_packed *) rtemp.value.refs; /* Make sure any initial skipped elements contain legal packed */ /* refs, so that the garbage collector can scan storage. */ pshort = pbody; for (; skip; skip--) *pbody++ = pt_tag(pt_integer); pdest = pbody; for (i = size; i != 0; i--) { pref = ref_stack_index(pstack, i - 1); switch (r_btype(pref)) { /* not r_type, opers are special */ case t_name: { uint nidx = name_index(imem, pref); if (nidx >= packed_name_max_index) break; /* can't pack */ *pdest++ = nidx + (r_has_attr(pref, a_executable) ? pt_tag(pt_executable_name) : pt_tag(pt_literal_name)); } continue; case t_integer: if (pref->value.intval < packed_min_intval || pref->value.intval > packed_max_intval ) break; *pdest++ = pt_tag(pt_integer) + ((short)pref->value.intval - packed_min_intval); continue; case t_oparray: case t_operator: { uint oidx; if (!r_has_attr(pref, a_executable)) break; oidx = op_index(pref); if (oidx == 0 || oidx > packed_int_mask) break; *pdest++ = pt_tag(pt_executable_operator) + oidx; } continue; } /* Can't pack this element, use a full ref. */ /* We may have to unpack up to align_packed_per_ref - 1 */ /* preceding short elements. */ /* Note that if we are at the beginning of the array, */ /* 'skip' already ensures that we don't need to do this. */ { int i = (pdest - pshort) & (align_packed_per_ref - 1); const ref_packed *psrc = pdest; ref *pmove = (ref *) (pdest += (packed_per_ref - 1) * i); ref_assign_new(pmove, pref); while (--i >= 0) { --psrc; --pmove; packed_get(imem->non_gc_memory, psrc, pmove); } } pshort = pdest += packed_per_ref; } { int atype = (pdest == pbody + size ? t_shortarray : t_mixedarray); /* Pad with legal packed refs so that the garbage collector */ /* can scan storage. */ for (; pad; pad--) *pdest++ = pt_tag(pt_integer); /* Finally, make the array. */ ref_stack_pop(pstack, size); make_tasv_new(parr, atype, a_readonly | space, size, packed, pbody + skip); } return 0; }
/* Note that .putdeviceparams clears the current pagedevice. */ static int zputdeviceparams(i_ctx_t *i_ctx_p) { uint count = ref_stack_counttomark(&o_stack); ref *prequire_all; ref *ppolicy; ref *pdev; gx_device *dev; stack_param_list list; int code; int old_width, old_height; int i, dest; if (count == 0) return_error(e_unmatchedmark); prequire_all = ref_stack_index(&o_stack, count); ppolicy = ref_stack_index(&o_stack, count + 1); pdev = ref_stack_index(&o_stack, count + 2); if (pdev == 0) return_error(e_stackunderflow); check_type_only(*prequire_all, t_boolean); check_write_type_only(*pdev, t_device); dev = pdev->value.pdevice; code = stack_param_list_read(&list, &o_stack, 0, ppolicy, prequire_all->value.boolval, iimemory); if (code < 0) return code; old_width = dev->width; old_height = dev->height; code = gs_putdeviceparams(dev, (gs_param_list *) & list); /* Check for names that were undefined or caused errors. */ for (dest = count - 2, i = 0; i < count >> 1; i++) if (list.results[i] < 0) { *ref_stack_index(&o_stack, dest) = *ref_stack_index(&o_stack, count - (i << 1) - 2); gs_errorname(i_ctx_p, list.results[i], ref_stack_index(&o_stack, dest - 1)); dest -= 2; } iparam_list_release(&list); if (code < 0) { /* There were errors reported. */ ref_stack_pop(&o_stack, dest + 1); return 0; } if (code > 0 || (code == 0 && (dev->width != old_width || dev->height != old_height))) { /* * The device was open and is now closed, or its dimensions have * changed. If it was the current device, call setdevice to * reinstall it and erase the page. */ /****** DOESN'T FIND ALL THE GSTATES THAT REFERENCE THE DEVICE. ******/ if (gs_currentdevice(igs) == dev) { bool was_open = dev->is_open; code = gs_setdevice_no_erase(igs, dev); /* If the device wasn't closed, setdevice won't erase the page. */ if (was_open && code >= 0) code = 1; } } if (code < 0) return code; ref_stack_pop(&o_stack, count + 1); make_bool(osp, code); clear_pagedevice(istate); return 0; }
static int set_language_level(i_ctx_t *i_ctx_p, int new_level) { int old_level = LANGUAGE_LEVEL; ref *pgdict = /* globaldict, if present */ ref_stack_index(&d_stack, ref_stack_count(&d_stack) - 2); ref *level2dict; int code = 0; if (new_level < 1 || new_level > (dict_find_string(systemdict, "ll3dict", &level2dict) > 0 ? 3 : 2) ) return_error(e_rangecheck); if (dict_find_string(systemdict, "level2dict", &level2dict) <= 0) return_error(e_undefined); /* * As noted in dstack.h, we allocate the extra d-stack entry for * globaldict even in Level 1 mode; in Level 1 mode, this entry * holds an extra copy of systemdict, and [count]dictstack omit the * very bottommost entry. */ while (new_level != old_level) { switch (old_level) { case 1: { /* 1 => 2 or 3 */ /* Put globaldict in the dictionary stack. */ ref *pdict; /* * This might be called so early in initialization that * globaldict hasn't been defined yet. If so, just skip * this step. */ code = dict_find_string(level2dict, "globaldict", &pdict); if (code > 0) { if (!r_has_type(pdict, t_dictionary)) return_error(e_typecheck); *pgdict = *pdict; } /* Set other flags for Level 2 operation. */ imemory->gs_lib_ctx->dict_auto_expand = true; } code = swap_level_dict(i_ctx_p, "level2dict"); if (code < 0) return code; ++old_level; continue; case 3: /* 3 => 1 or 2 */ code = swap_level_dict(i_ctx_p, "ll3dict"); if (code < 0) return code; --old_level; continue; default: /* 2 => 1 or 3 */ break; } switch (new_level) { case 1: { /* 2 => 1 */ /* * Clear the cached definition pointers of all names defined * in globaldict. This will slow down future lookups, but * we don't care. */ int index = dict_first(pgdict); ref elt[2]; while ((index = dict_next(pgdict, index, &elt[0])) >= 0) if (r_has_type(&elt[0], t_name)) name_invalidate_value_cache(imemory, &elt[0]); /* Overwrite globaldict in the dictionary stack. */ *pgdict = *systemdict; /* Set other flags for Level 1 operation. */ imemory->gs_lib_ctx->dict_auto_expand = false; } code = swap_level_dict(i_ctx_p, "level2dict"); break; case 3: /* 2 => 3 */ code = swap_level_dict(i_ctx_p, "ll3dict"); break; default: /* not possible */ return_error(e_Fatal); } break; } dict_set_top(); /* reload dict stack cache */ return code; }