int dict_find(const ref * pdref, const ref * pkey, ref ** ppvalue) { dict *pdict = pdref->value.pdict; int code = real_dict_find(pdref, pkey, ppvalue); stats_dict.lookups++; if (r_has_type(pkey, t_name) && dict_is_packed(pdict)) { uint nidx = name_index(dict_mem(pdict), pkey); uint hash = dict_hash_mod(dict_name_index_hash(nidx), npairs(pdict)) + 1; if (pdict->keys.value.packed[hash] == pt_tag(pt_literal_name) + nidx ) stats_dict.probe1++; else if (pdict->keys.value.packed[hash - 1] == pt_tag(pt_literal_name) + nidx ) stats_dict.probe2++; } /* Do the cheap flag test before the expensive remainder test. */ if (gs_debug_c('d') && !(stats_dict.lookups % 1000)) dlprintf3("[d]lookups=%ld probe1=%ld probe2=%ld\n", stats_dict.lookups, stats_dict.probe1, stats_dict.probe2); return code; }
ref * dstack_find_name_by_index(dict_stack_t * pds, uint nidx) { ref *pvalue = real_dstack_find_name_by_index(pds, nidx); dict *pdict = pds->stack.p->value.pdict; INCR(lookups); if (dict_is_packed(pdict)) { uint hash = dict_hash_mod(dict_name_index_hash(nidx), npairs(pdict)) + 1; if (pdict->keys.value.packed[hash] == pt_tag(pt_literal_name) + nidx ) INCR(probes[0]); else if (pdict->keys.value.packed[hash - 1] == pt_tag(pt_literal_name) + nidx ) INCR(probes[1]); } if (gs_debug_c('d') && !(stats_dstack.lookups % 1000)) dlprintf3("[d]lookups=%ld probe1=%ld probe2=%ld\n", stats_dstack.lookups, stats_dstack.probes[0], stats_dstack.probes[1]); return pvalue; }
/* ensuring that refs in mixed arrays are properly aligned. */ #undef idmemory /****** NOTA BENE ******/ int make_packed_array(ref * parr, ref_stack_t * pstack, uint size, gs_dual_memory_t *idmemory, client_name_t cname) { uint i; const ref *pref; uint idest = 0, ishort = 0; ref_packed *pbody; ref_packed *pdest; ref_packed *pshort; /* points to start of */ /* last run of short elements */ gs_ref_memory_t *imem = idmemory->current; uint space = imemory_space(imem); int skip = 0, pad; ref rtemp; int code; /* Do a first pass to calculate the size of the array, */ /* and to detect local-into-global stores. */ for (i = size; i != 0; i--) { pref = ref_stack_index(pstack, i - 1); switch (r_btype(pref)) { /* not r_type, opers are special */ case t_name: if (name_index(imem, pref) >= packed_name_max_index) break; /* can't pack */ idest++; continue; case t_integer: if (pref->value.intval < packed_min_intval || pref->value.intval > packed_max_intval ) break; idest++; continue; case t_oparray: /* Check for local-into-global store. */ store_check_space(space, pref); /* falls through */ case t_operator: { uint oidx; if (!r_has_attr(pref, a_executable)) break; oidx = op_index(pref); if (oidx == 0 || oidx > packed_int_mask) break; } idest++; continue; default: /* Check for local-into-global store. */ store_check_space(space, pref); } /* Can't pack this element, use a full ref. */ /* We may have to unpack up to align_packed_per_ref - 1 */ /* preceding short elements. */ /* If we are at the beginning of the array, however, */ /* we can just move the elements up. */ { int i = (idest - ishort) & (align_packed_per_ref - 1); if (ishort == 0) /* first time */ idest += skip = -i & (align_packed_per_ref - 1); else idest += (packed_per_ref - 1) * i; } ishort = idest += packed_per_ref; } pad = -(int)idest & (packed_per_ref - 1); /* padding at end */ /* Now we can allocate the array. */ code = gs_alloc_ref_array(imem, &rtemp, 0, (idest + pad) / packed_per_ref, cname); if (code < 0) return code; pbody = (ref_packed *) rtemp.value.refs; /* Make sure any initial skipped elements contain legal packed */ /* refs, so that the garbage collector can scan storage. */ pshort = pbody; for (; skip; skip--) *pbody++ = pt_tag(pt_integer); pdest = pbody; for (i = size; i != 0; i--) { pref = ref_stack_index(pstack, i - 1); switch (r_btype(pref)) { /* not r_type, opers are special */ case t_name: { uint nidx = name_index(imem, pref); if (nidx >= packed_name_max_index) break; /* can't pack */ *pdest++ = nidx + (r_has_attr(pref, a_executable) ? pt_tag(pt_executable_name) : pt_tag(pt_literal_name)); } continue; case t_integer: if (pref->value.intval < packed_min_intval || pref->value.intval > packed_max_intval ) break; *pdest++ = pt_tag(pt_integer) + ((short)pref->value.intval - packed_min_intval); continue; case t_oparray: case t_operator: { uint oidx; if (!r_has_attr(pref, a_executable)) break; oidx = op_index(pref); if (oidx == 0 || oidx > packed_int_mask) break; *pdest++ = pt_tag(pt_executable_operator) + oidx; } continue; } /* Can't pack this element, use a full ref. */ /* We may have to unpack up to align_packed_per_ref - 1 */ /* preceding short elements. */ /* Note that if we are at the beginning of the array, */ /* 'skip' already ensures that we don't need to do this. */ { int i = (pdest - pshort) & (align_packed_per_ref - 1); const ref_packed *psrc = pdest; ref *pmove = (ref *) (pdest += (packed_per_ref - 1) * i); ref_assign_new(pmove, pref); while (--i >= 0) { --psrc; --pmove; packed_get(imem->non_gc_memory, psrc, pmove); } } pshort = pdest += packed_per_ref; } { int atype = (pdest == pbody + size ? t_shortarray : t_mixedarray); /* Pad with legal packed refs so that the garbage collector */ /* can scan storage. */ for (; pad; pad--) *pdest++ = pt_tag(pt_integer); /* Finally, make the array. */ ref_stack_pop(pstack, size); make_tasv_new(parr, atype, a_readonly | space, size, packed, pbody + skip); } return 0; }
static int zbind(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint depth = 1; ref defn; register os_ptr bsp; switch (r_type(op)) { case t_array: if (!r_has_attr(op, a_write)) { return 0; /* per PLRM3 */ } case t_mixedarray: case t_shortarray: defn = *op; break; case t_oparray: defn = *op->value.const_refs; break; default: return_op_typecheck(op); } push(1); *op = defn; bsp = op; /* * We must not make the top-level procedure read-only, * but we must bind it even if it is read-only already. * * Here are the invariants for the following loop: * `depth' elements have been pushed on the ostack; * For i < depth, p = ref_stack_index(&o_stack, i): * *p is an array (or packedarray) ref. */ while (depth) { while (r_size(bsp)) { ref_packed *const tpp = (ref_packed *)bsp->value.packed; /* break const */ r_dec_size(bsp, 1); if (r_is_packed(tpp)) { /* Check for a packed executable name */ ushort elt = *tpp; if (r_packed_is_exec_name(&elt)) { ref nref; ref *pvalue; name_index_ref(imemory, packed_name_index(&elt), &nref); if ((pvalue = dict_find_name(&nref)) != 0 && r_is_ex_oper(pvalue) ) { store_check_dest(bsp, pvalue); /* * Always save the change, since this can only * happen once. */ ref_do_save(bsp, tpp, "bind"); *tpp = pt_tag(pt_executable_operator) + op_index(pvalue); } } bsp->value.packed = tpp + 1; } else { ref *const tp = bsp->value.refs++; switch (r_type(tp)) { case t_name: /* bind the name if an operator */ if (r_has_attr(tp, a_executable)) { ref *pvalue; if ((pvalue = dict_find_name(tp)) != 0 && r_is_ex_oper(pvalue) ) { store_check_dest(bsp, pvalue); ref_assign_old(bsp, tp, pvalue, "bind"); } } break; case t_array: /* push into array if writable */ if (!r_has_attr(tp, a_write)) break; case t_mixedarray: case t_shortarray: if (r_has_attr(tp, a_executable)) { /* Make reference read-only */ r_clear_attrs(tp, a_write); if (bsp >= ostop) { /* Push a new stack block. */ ref temp; int code; temp = *tp; osp = bsp; code = ref_stack_push(&o_stack, 1); if (code < 0) { ref_stack_pop(&o_stack, depth); return_error(code); } bsp = osp; *bsp = temp; } else *++bsp = *tp; depth++; } } } } bsp--; depth--; if (bsp < osbot) { /* Pop back to the previous stack block. */ osp = bsp; ref_stack_pop_block(&o_stack); bsp = osp; } } osp = bsp; return 0; }
/* Remove the marks at the same time. */ static void refs_compact(const gs_memory_t *mem, obj_header_t * pre, obj_header_t * dpre, uint size) { ref_packed *dest; ref_packed *src; ref_packed *end; uint new_size; /* The next switch controls an optimization for the loop termination condition. It was useful during the development, when some assumptions were temporary wrong. We keep it for records. */ src = (ref_packed *) (pre + 1); end = (ref_packed *) ((byte *) src + size); /* * We know that a block of refs always ends with a * full-size ref, so we only need to check for reaching the end * of the block when we see one of those. */ if (dpre == pre) /* Loop while we don't need to copy. */ for (;;) { if (r_is_packed(src)) { if (!r_has_pmark(src)) break; if_debug1('8', " [8]packed ref 0x%lx \"copied\"\n", (ulong) src); *src &= ~lp_mark; src++; } else { /* full-size ref */ ref *const pref = (ref *)src; if (!r_has_attr(pref, l_mark)) break; if_debug1('8', " [8]ref 0x%lx \"copied\"\n", (ulong) src); r_clear_attrs(pref, l_mark); src += packed_per_ref; } } else *dpre = *pre; dest = (ref_packed *) ((char *)dpre + ((char *)src - (char *)pre)); for (;;) { if (r_is_packed(src)) { if (r_has_pmark(src)) { if_debug2('8', " [8]packed ref 0x%lx copied to 0x%lx\n", (ulong) src, (ulong) dest); *dest++ = *src & ~lp_mark; } src++; } else { /* full-size ref */ if (r_has_attr((ref *) src, l_mark)) { ref rtemp; if_debug2('8', " [8]ref 0x%lx copied to 0x%lx\n", (ulong) src, (ulong) dest); /* We can't just use ref_assign_inline, */ /* because the source and destination */ /* might overlap! */ ref_assign_inline(&rtemp, (ref *) src); r_clear_attrs(&rtemp, l_mark); ref_assign_inline((ref *) dest, &rtemp); src += packed_per_ref; dest += packed_per_ref; } else { /* check for end of block */ src += packed_per_ref; if (src >= end) break; } } } new_size = (byte *) dest - (byte *) (dpre + 1) + sizeof(ref); #ifdef DEBUG /* Check that the relocation came out OK. */ /* NOTE: this check only works within a single chunk. */ if ((byte *) src - (byte *) dest != r_size((ref *) src - 1) + sizeof(ref)) { lprintf3("Reloc error for refs 0x%lx: reloc = %lu, stored = %u\n", (ulong) dpre, (ulong) ((byte *) src - (byte *) dest), (uint) r_size((ref *) src - 1)); gs_abort(mem); } #endif /* Pad to a multiple of sizeof(ref). */ while (new_size & (sizeof(ref) - 1)) *dest++ = pt_tag(pt_integer), new_size += sizeof(ref_packed); /* We want to make the newly freed space into a free block, */ /* but we can only do this if we have enough room. */ if (size - new_size < sizeof(obj_header_t)) { /* Not enough room. Pad to original size. */ while (new_size < size) *dest++ = pt_tag(pt_integer), new_size += sizeof(ref_packed); } else { obj_header_t *pfree = (obj_header_t *) ((ref *) dest + 1); pfree->o_alone = 0; pfree->o_size = size - new_size - sizeof(obj_header_t); pfree->o_type = &st_bytes; } /* Re-create the final ref. */ r_set_type((ref *) dest, t_integer); dpre->o_size = new_size; }
/* See gsmemory.h for why the argument is const and the result is not. */ ref_packed * igc_reloc_ref_ptr_nocheck(const ref_packed * prp, gc_state_t *gcst) { /* * Search forward for relocation. This algorithm is intrinsically very * inefficient; we hope eventually to replace it with a better one. */ const ref_packed *rp = prp; uint dec = 0; #ifdef ALIGNMENT_ALIASING_BUG const ref *rpref; # define RP_REF(rp) (rpref = (const ref *)rp, rpref) #else # define RP_REF(rp) ((const ref *)rp) #endif for (;;) { if (r_is_packed(rp)) { /* * Normally, an unmarked packed ref will be an * integer whose value is the amount of relocation. * However, the relocation value might have been * too large to fit. If this is the case, for * each such unmarked packed ref we pass over, * we have to decrement the final relocation. */ rputc((*rp & lp_mark ? '1' : '0')); if (!(*rp & lp_mark)) { if (*rp != pt_tag(pt_integer) + packed_max_value) { /* This is a stored relocation value. */ rputc('\n'); rp = print_reloc(prp, "ref", (const ref_packed *) ((const char *)prp - (*rp & packed_value_mask) + dec)); break; } /* * We know this is the first of an aligned block * of packed refs. Skip over the entire block, * decrementing the final relocation. */ dec += sizeof(ref_packed) * align_packed_per_ref; rp += align_packed_per_ref; } else rp++; continue; } if (!ref_type_uses_size_or_null(r_type(RP_REF(rp)))) { /* reloc is in r_size */ rputc('\n'); rp = print_reloc(prp, "ref", (const ref_packed *) (r_size(RP_REF(rp)) == 0 ? prp : (const ref_packed *)((const char *)prp - r_size(RP_REF(rp)) + dec))); break; } rputc('u'); rp += packed_per_ref; } /* Use a severely deprecated pun to remove the const property. */ { union { const ref_packed *r; ref_packed *w; } u; u.r = rp; return u.w; } #undef RP_REF }
/* Set the relocation for a ref object. */ static bool refs_set_reloc(obj_header_t * hdr, uint reloc, uint size) { ref_packed *rp = (ref_packed *) (hdr + 1); ref_packed *end = (ref_packed *) ((byte *) rp + size); uint freed = 0; /* * We have to be careful to keep refs aligned properly. * For the moment, we do this by either keeping or discarding * an entire (aligned) block of align_packed_per_ref packed elements * as a unit. We know that align_packed_per_ref <= packed_per_ref, * and we also know that packed refs are always allocated in blocks * of align_packed_per_ref, so this makes things relatively easy. */ while (rp < end) { if (r_is_packed(rp)) { #if align_packed_per_ref == 1 if (r_has_pmark(rp)) { if_debug1('8', " [8]packed ref 0x%lx is marked\n", (ulong) rp); rp++; } else { #else int i; /* * Note: align_packed_per_ref is typically * 2 or 4 for 32-bit processors. */ #define all_marked (align_packed_per_ref * lp_mark) # if align_packed_per_ref == 2 # if arch_sizeof_int == arch_sizeof_short * 2 # undef all_marked # define all_marked ( (lp_mark << (sizeof(short) * 8)) + lp_mark ) # define marked (*(int *)rp & all_marked) # else # define marked ((*rp & lp_mark) + (rp[1] & lp_mark)) # endif # else # if align_packed_per_ref == 4 # define marked ((*rp & lp_mark) + (rp[1] & lp_mark) +\ (rp[2] & lp_mark) + (rp[3] & lp_mark)) # else /* * The value of marked is logically a uint, not an int: * we declare it as int only to avoid a compiler warning * message about using a non-int value in a switch statement. */ int marked = *rp & lp_mark; for (i = 1; i < align_packed_per_ref; i++) marked += rp[i] & lp_mark; # endif # endif /* * Now marked is lp_mark * the number of marked * packed refs in the aligned block, except for * a couple of special cases above. */ switch (marked) { case all_marked: if_debug2('8', " [8]packed refs 0x%lx..0x%lx are marked\n", (ulong) rp, (ulong) (rp + (align_packed_per_ref - 1))); rp += align_packed_per_ref; break; default: /* At least one packed ref in the block */ /* is marked: Keep the whole block. */ for (i = align_packed_per_ref; i--; rp++) { r_set_pmark(rp); if_debug1('8', " [8]packed ref 0x%lx is marked\n", (ulong) rp); } break; case 0: #endif if_debug2('8', " [8]%d packed ref(s) at 0x%lx are unmarked\n", align_packed_per_ref, (ulong) rp); { uint rel = reloc + freed; /* Change this to an integer so we can */ /* store the relocation here. */ *rp = pt_tag(pt_integer) + min(rel, packed_max_value); } rp += align_packed_per_ref; freed += sizeof(ref_packed) * align_packed_per_ref; } } else { /* full-size ref */ uint rel = reloc + freed; /* The following assignment is logically */ /* unnecessary; we do it only for convenience */ /* in debugging. */ ref *pref = (ref *) rp; if (!r_has_attr(pref, l_mark)) { if_debug1('8', " [8]ref 0x%lx is unmarked\n", (ulong) pref); /* Change this to a mark so we can */ /* store the relocation. */ r_set_type(pref, t_mark); r_set_size(pref, rel); freed += sizeof(ref); } else { if_debug1('8', " [8]ref 0x%lx is marked\n", (ulong) pref); /* Store the relocation here if possible. */ if (!ref_type_uses_size_or_null(r_type(pref))) { if_debug2('8', " [8]storing reloc %u at 0x%lx\n", rel, (ulong) pref); r_set_size(pref, rel); } } rp += packed_per_ref; } } if_debug3('7', " [7]at end of refs 0x%lx, size = %u, freed = %u\n", (ulong) (hdr + 1), size, freed); if (freed == size) return false; #if arch_sizeof_int > arch_sizeof_short /* * If the final relocation can't fit in the r_size field * (which can't happen if the object shares a chunk with * any other objects, so we know reloc = 0 in this case), * we have to keep the entire object unless there are no * references to any ref in it. */ if (freed <= max_ushort) return true; /* * We have to mark all surviving refs, but we also must * overwrite any non-surviving refs with something that * doesn't contain any pointers. */ rp = (ref_packed *) (hdr + 1); while (rp < end) { if (r_is_packed(rp)) { if (!r_has_pmark(rp)) *rp = pt_tag(pt_integer) | lp_mark; ++rp; } else { /* The following assignment is logically */ /* unnecessary; we do it only for convenience */ /* in debugging. */ ref *pref = (ref *) rp; if (!r_has_attr(pref, l_mark)) { r_set_type_attrs(pref, t_mark, l_mark); r_set_size(pref, reloc); } else { if (!ref_type_uses_size_or_null(r_type(pref))) r_set_size(pref, reloc); } rp += packed_per_ref; } } /* The last ref has to remain unmarked. */ r_clear_attrs((ref *) rp - 1, l_mark); #endif return true; }
/* * Enter a key-value pair in a dictionary. * See idict.h for the possible return values. */ int dict_put(ref * pdref /* t_dictionary */ , const ref * pkey, const ref * pvalue, dict_stack_t *pds) { dict *pdict = pdref->value.pdict; gs_ref_memory_t *mem = dict_memory(pdict); gs_memory_t *pmem = dict_mem(pdict); int rcode = 0; int code; ref *pvslot, kname; /* Check the value. */ store_check_dest(pdref, pvalue); top:if ((code = dict_find(pdref, pkey, &pvslot)) <= 0) { /* not found *//* Check for overflow */ uint index; switch (code) { case 0: break; case gs_error_dictfull: if (!pmem->gs_lib_ctx->dict_auto_expand) return_error(gs_error_dictfull); code = dict_grow(pdref, pds); if (code < 0) return code; goto top; /* keep things simple */ default: /* gs_error_typecheck */ return code; } index = pvslot - pdict->values.value.refs; /* If the key is a string, convert it to a name. */ if (r_has_type(pkey, t_string)) { int code; if (!r_has_attr(pkey, a_read)) return_error(gs_error_invalidaccess); code = name_from_string(pmem, pkey, &kname); if (code < 0) return code; pkey = &kname; } if (dict_is_packed(pdict)) { ref_packed *kp; if (!r_has_type(pkey, t_name) || name_index(pmem, pkey) > packed_name_max_index ) { /* Change to unpacked representation. */ int code = dict_unpack(pdref, pds); if (code < 0) return code; goto top; } kp = pdict->keys.value.writable_packed + index; if (ref_must_save_in(mem, &pdict->keys)) { /* See initial comment for why it is safe */ /* not to save the change if the keys */ /* array itself is new. */ ref_do_save_in(mem, &pdict->keys, kp, "dict_put(key)"); } *kp = pt_tag(pt_literal_name) + name_index(pmem, pkey); } else { ref *kp = pdict->keys.value.refs + index; if_debug2m('d', (const gs_memory_t *)mem, "[d]0x%lx: fill key at 0x%lx\n", (ulong) pdict, (ulong) kp); store_check_dest(pdref, pkey); ref_assign_old_in(mem, &pdict->keys, kp, pkey, "dict_put(key)"); /* set key of pair */ } ref_save_in(mem, pdref, &pdict->count, "dict_put(count)"); pdict->count.value.intval++; /* If the key is a name, update its 1-element cache. */ if (r_has_type(pkey, t_name)) { name *pname = pkey->value.pname; if (pname->pvalue == pv_no_defn && CAN_SET_PVALUE_CACHE(pds, pdref, mem) ) { /* Set the cache. */ if_debug0m('d', (const gs_memory_t *)mem, "[d]set cache\n"); pname->pvalue = pvslot; } else { /* The cache can't be used. */ if_debug0m('d', (const gs_memory_t *)mem, "[d]no cache\n"); pname->pvalue = pv_other; } } rcode = 1; } if_debug8m('d', (const gs_memory_t *)mem, "[d]0x%lx: put key 0x%lx 0x%lx\n value at 0x%lx: old 0x%lx 0x%lx, new 0x%lx 0x%lx\n", (ulong) pdref->value.pdict, ((const ulong *)pkey)[0], ((const ulong *)pkey)[1], (ulong) pvslot, ((const ulong *)pvslot)[0], ((const ulong *)pvslot)[1], ((const ulong *)pvalue)[0], ((const ulong *)pvalue)[1]); ref_assign_old_in(mem, &pdref->value.pdict->values, pvslot, pvalue, "dict_put(value)"); return rcode; }