/* Common functionality of zgethardwareparms & zgetdeviceparams */ static int zget_device_params(i_ctx_t *i_ctx_p, bool is_hardware) { os_ptr op = osp; ref rkeys; gx_device *dev; stack_param_list list; int code; ref *pmark; check_read_type(op[-1], t_device); rkeys = *op; dev = op[-1].value.pdevice; pop(1); stack_param_list_write(&list, &o_stack, &rkeys, iimemory); code = gs_get_device_or_hardware_params(dev, (gs_param_list *) & list, is_hardware); if (code < 0) { /* We have to put back the top argument. */ if (list.count > 0) ref_stack_pop(&o_stack, list.count * 2 - 1); else ref_stack_push(&o_stack, 1); *osp = rkeys; return code; } pmark = ref_stack_index(&o_stack, list.count * 2); make_mark(pmark); return 0; }
/* <obj1> ... <objn> <int> copy <obj1> ... <objn> <obj1> ... <objn> */ static int zcopy_integer(i_ctx_t *i_ctx_p) { os_ptr op = osp; os_ptr op1 = op - 1; int count, i; int code; if ((uint) op->value.intval > (uint)(op - osbot)) { /* There might be enough elements in other blocks. */ check_type(*op, t_integer); if (op->value.intval >= (int)ref_stack_count(&o_stack)) return_error(e_stackunderflow); if (op->value.intval < 0) return_error(e_rangecheck); check_int_ltu(*op, ref_stack_count(&o_stack)); count = op->value.intval; } else if (op1 + (count = op->value.intval) <= ostop) { /* Fast case. */ memcpy((char *)op, (char *)(op - count), count * sizeof(ref)); push(count - 1); return 0; } /* Do it the slow, general way. */ code = ref_stack_push(&o_stack, count - 1); if (code < 0) return code; for (i = 0; i < count; i++) *ref_stack_index(&o_stack, i) = *ref_stack_index(&o_stack, i + count); return 0; }
/* Implementation for getting parameters to a stack. */ static int stack_param_write(iparam_list * plist, const ref * pkey, const ref * pvalue) { stack_param_list *const splist = (stack_param_list *) plist; ref_stack_t *pstack = splist->pstack; s_ptr p = pstack->p; if (pstack->top - p < 2) { int code = ref_stack_push(pstack, 2); if (code < 0) return code; *ref_stack_index(pstack, 1) = *pkey; p = pstack->p; } else { pstack->p = p += 2; p[-1] = *pkey; } *p = *pvalue; splist->count++; return 0; }
/* <array> aload <obj_0> ... <obj_n-1> <array> */ static int zaload(i_ctx_t *i_ctx_p) { os_ptr op = osp; ref aref; uint asize; ref_assign(&aref, op); if (!r_is_array(&aref)) return_op_typecheck(op); check_read(aref); asize = r_size(&aref); if (asize > ostop - op) { /* Use the slow, general algorithm. */ int code = ref_stack_push(&o_stack, asize); uint i; const ref_packed *packed = aref.value.packed; if (code < 0) return code; for (i = asize; i > 0; i--, packed = packed_next(packed)) packed_get(imemory, packed, ref_stack_index(&o_stack, i)); *osp = aref; return 0; } if (r_has_type(&aref, t_array)) memcpy(op, aref.value.refs, asize * sizeof(ref)); else { uint i; const ref_packed *packed = aref.value.packed; os_ptr pdest = op; for (i = 0; i < asize; i++, pdest++, packed = packed_next(packed)) packed_get(imemory, packed, pdest); } push(asize); ref_assign(op, &aref); return 0; }
static int zbind(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint depth = 1; ref defn; register os_ptr bsp; switch (r_type(op)) { case t_array: if (!r_has_attr(op, a_write)) { return 0; /* per PLRM3 */ } case t_mixedarray: case t_shortarray: defn = *op; break; case t_oparray: defn = *op->value.const_refs; break; default: return_op_typecheck(op); } push(1); *op = defn; bsp = op; /* * We must not make the top-level procedure read-only, * but we must bind it even if it is read-only already. * * Here are the invariants for the following loop: * `depth' elements have been pushed on the ostack; * For i < depth, p = ref_stack_index(&o_stack, i): * *p is an array (or packedarray) ref. */ while (depth) { while (r_size(bsp)) { ref_packed *const tpp = (ref_packed *)bsp->value.packed; /* break const */ r_dec_size(bsp, 1); if (r_is_packed(tpp)) { /* Check for a packed executable name */ ushort elt = *tpp; if (r_packed_is_exec_name(&elt)) { ref nref; ref *pvalue; name_index_ref(imemory, packed_name_index(&elt), &nref); if ((pvalue = dict_find_name(&nref)) != 0 && r_is_ex_oper(pvalue) ) { store_check_dest(bsp, pvalue); /* * Always save the change, since this can only * happen once. */ ref_do_save(bsp, tpp, "bind"); *tpp = pt_tag(pt_executable_operator) + op_index(pvalue); } } bsp->value.packed = tpp + 1; } else { ref *const tp = bsp->value.refs++; switch (r_type(tp)) { case t_name: /* bind the name if an operator */ if (r_has_attr(tp, a_executable)) { ref *pvalue; if ((pvalue = dict_find_name(tp)) != 0 && r_is_ex_oper(pvalue) ) { store_check_dest(bsp, pvalue); ref_assign_old(bsp, tp, pvalue, "bind"); } } break; case t_array: /* push into array if writable */ if (!r_has_attr(tp, a_write)) break; case t_mixedarray: case t_shortarray: if (r_has_attr(tp, a_executable)) { /* Make reference read-only */ r_clear_attrs(tp, a_write); if (bsp >= ostop) { /* Push a new stack block. */ ref temp; int code; temp = *tp; osp = bsp; code = ref_stack_push(&o_stack, 1); if (code < 0) { ref_stack_pop(&o_stack, depth); return_error(code); } bsp = osp; *bsp = temp; } else *++bsp = *tp; depth++; } } } } bsp--; depth--; if (bsp < osbot) { /* Pop back to the previous stack block. */ osp = bsp; ref_stack_pop_block(&o_stack); bsp = osp; } } osp = bsp; return 0; }