/* * Create a palloc'd copy of an index tuple, leaving only the first * leavenatts attributes remaining. * * Truncation is guaranteed to result in an index tuple that is no * larger than the original. It is safe to use the IndexTuple with * the original tuple descriptor, but caller must avoid actually * accessing truncated attributes from returned tuple! In practice * this means that index_getattr() must be called with special care, * and that the truncated tuple should only ever be accessed by code * under caller's direct control. * * It's safe to call this function with a buffer lock held, since it * never performs external table access. If it ever became possible * for index tuples to contain EXTERNAL TOAST values, then this would * have to be revisited. */ IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, int leavenatts) { TupleDesc truncdesc; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; IndexTuple truncated; Assert(leavenatts < sourceDescriptor->natts); /* Create temporary descriptor to scribble on */ truncdesc = palloc(TupleDescSize(sourceDescriptor)); TupleDescCopy(truncdesc, sourceDescriptor); truncdesc->natts = leavenatts; /* Deform, form copy of tuple with fewer attributes */ index_deform_tuple(source, truncdesc, values, isnull); truncated = index_form_tuple(truncdesc, values, isnull); truncated->t_tid = source->t_tid; Assert(IndexTupleSize(truncated) <= IndexTupleSize(source)); /* * Cannot leak memory here, TupleDescCopy() doesn't allocate any inner * structure, so, plain pfree() should clean all allocated memory */ pfree(truncdesc); return truncated; }
/* * Insert an entry and perhaps return the top element of the heap in *e * * Comparison happens from the specified level to the end of levels, as needed: * Return < 0 if smaller than heap top; *e is unchanged * Return = 0 if eq to heap top ; *e is unchanged (but will have value equal to the heap top) * Return > 0 if successfully inserted; *e is populated with the removed heap top * * If 0 would be returned but the heap is marked as needing uniqueness enforcement, error is generated instead */ static int mkheap_putAndGet_impl(MKHeap *mkheap, MKEntry *e) { int c = 0; int toplv; MKEntry tmp; /* can't put+get from an empty heap */ Assert(mkheap->count > 0); if (mkheap->mkctxt->enforceUnique && mke_has_duplicates_with_root(mkheap)) { /** * See NOTE ON UNIQUENESS CHECKING in the comment at the top of the file * for information about why we check for duplicates here */ Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; index_deform_tuple((IndexTuple) mkheap->p->ptr, mkheap->mkctxt->tupdesc, values, isnull); ereport(ERROR, (errcode(ERRCODE_UNIQUE_VIOLATION), errmsg("could not create unique index \"%s\"", RelationGetRelationName(mkheap->mkctxt->indexRel)), errdetail("Key %s is duplicated.", BuildIndexValueDescription(mkheap->mkctxt->indexRel, values, isnull)), errtableconstraint(mkheap->mkctxt->heapRel, RelationGetRelationName(mkheap->mkctxt->indexRel)))); } if (mke_is_empty(e)) { /* * adding an empty (sentinel): just remove from count and fallthrough * to where top is removed */ --mkheap->count; } else if (mke_get_run(e) != mke_get_run(mkheap->p)) { /* * this code assumes that the new one, with lower run, is LARGER than * the top -- so it must be larger run */ Assert(mke_get_run(e) > mke_get_run(mkheap->p)); /* * when the runs differ it is because we attempted once with the runs * equal. So if level is zero then: the level was zero AND validly * prepared for the previous run -- and there is no need to prep again */ if (mke_get_lv(e) != 0) { /* Not same run, at least prepare lv 0 */ if (mkheap->mkctxt->fetchForPrep) tupsort_prepare(e, mkheap->mkctxt, 0); mke_set_lv(e, 0); } /* * now fall through and let top be returned, new one is also inserted * so no change to count */ } else { /* same run so figure out where it fits in relation to the heap top */ int lv = 0; toplv = mke_get_lv(mkheap->p); mke_set_lv(e, lv); /* populate level until we differ from the top element of the heap */ while (lv < toplv) { if (mkheap->mkctxt->fetchForPrep) tupsort_prepare(e, mkheap->mkctxt, lv); c = mkheap_compare(mkheap, e, mkheap->lvtops + lv); if (c != 0) break; mke_set_lv(e, ++lv); } /* smaller than top */ if (c < 0) return -1; /* * we have not done e->lv == toplv yet since we increment at the end * of the previous loop. Do it now. */ Assert(mke_get_lv(e) == lv); if (lv == toplv) { if (mkheap->mkctxt->fetchForPrep) tupsort_prepare(e, mkheap->mkctxt, lv); c = mkheap_compare(mkheap, e, mkheap->p); if (c < 0) return -1; } if (c == 0) { /* * Equal and at top level. * * This means that e is less-than/equal to all entries except the * heap top. */ Assert(mke_get_lv(e) == lv); Assert(lv == mke_get_lv(mkheap->p)); /* * Expand more levels of lvtop in the current top and the new one * until we detect a difference. */ while (lv < mkheap->mkctxt->total_lv - 1) { mkheap_save_lvtop(mkheap); ++lv; /* expand top */ if (mkheap->mkctxt->fetchForPrep) tupsort_prepare(mkheap->p, mkheap->mkctxt, lv); /* expand new element */ if (mkheap->mkctxt->fetchForPrep) tupsort_prepare(e, mkheap->mkctxt, lv); mke_set_lv(mkheap->p, lv); mke_set_lv(e, lv); c = mkheap_compare(mkheap, e, mkheap->p); if (c != 0) break; } if (c <= 0) { /* * if new one is less than current top then we just return * that negative comparison */ /* * if new one equals the current top then we could do an * insert and immediate removal -- but it won't matter so we * simply return right away, leaving *e untouched */ /* enforce uniqueness first */ if (c == 0 && mkheap->mkctxt->enforceUnique) { Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; index_deform_tuple((IndexTuple) mkheap->p->ptr, mkheap->mkctxt->tupdesc, values, isnull); ereport(ERROR, (errcode(ERRCODE_UNIQUE_VIOLATION), errmsg("could not create unique index \"%s\"", RelationGetRelationName(mkheap->mkctxt->indexRel)), errdetail("Key %s is duplicated.", BuildIndexValueDescription(mkheap->mkctxt->indexRel, values, isnull)))); } return c; } } } /* * Now, I am bigger than top but not definitely smaller/equal to all other * entries * * So we will: return top as *e do heap shuffling to restore heap ordering */ tmp = *e; *e = mkheap->p[0]; /* Sift down a hole to bottom of (current or next) run, depends on tmp.run */ mkheap_siftdown(mkheap, 0, &tmp); if (mkheap_need_heapify(mkheap)) mkheap_heapify(mkheap, false); if (mkheap->count > 0) { mkheap_update_lvtops(mkheap); } #ifdef USE_ASSERT_CHECKING if (gp_mk_sort_check) mkheap_verify_heap(mkheap, 0); #endif return 1; }