Beispiel #1
0
rt_public EIF_BOOLEAN eequal(register EIF_REFERENCE target, register EIF_REFERENCE source)
{
	/* Eiffel standard equality: it assumes that dynamic type of Eiffel
	 * object refered by `source' conforms to dynamic type of Eiffel
	 * object refered by `target'. `source' and `target' cannot be NULL
	 * or special objects here.
	 * If both `source' and `target' have the same dynamic type and this
	 * type is not composite, then perform a block comparison; otherwise
	 * perform a field by field comparison.
	 * It is the feature `standard_is_equal' of class ANY.
	 * Return a boolean.
	 */

	REQUIRE ("source_not_null", source);
	REQUIRE ("target_not_null", target);

	if (source == target) {
			/* Minor optimization, if references are equal then it is the same object. */
		return EIF_TRUE;
	}

	if (Dftype(source) == Dftype(target)) {
		/* Dynamic type are the same: because of the intra-expanded
		 * references, we can perform only a block comparison if
		 * the target (or the source) is a composite object (i.e: it has
		 * expanded attributes): since an attribute keeps expanded or
		 * not expanded all the time, we can test either the source or
		 * the target.
		 */
		if (HEADER(source)->ov_flags & EO_SPEC) {
				/* Works for both SPECIAL and TUPLE object */
				/* Eiffel standard equality on special objects: type check assumes
				* the comparison is on areas of the same type (containing the same
				* thing). Called by the redefinition of feature `equal' of special
				* class. `source' and/or `target' cannot be NULL.
				* Return a boolean.
				*/

				/* First condition: same count */
			if
				((RT_SPECIAL_COUNT(source) != RT_SPECIAL_COUNT(target)) ||
				(RT_SPECIAL_ELEM_SIZE(source) != RT_SPECIAL_ELEM_SIZE(target)))
			{
				return EIF_FALSE;
			} else {
					/* Second condition: block equality */
				return EIF_TEST(!memcmp (source, target, RT_SPECIAL_VISIBLE_SIZE(source)));
			}
		} else {
			if (!(HEADER(source)->ov_flags & EO_COMP))	/* Perform a block comparison */
				return EIF_TEST(!memcmp (source, target, EIF_Size(Dtype(source))));
			else
				return e_field_equal(target, source);
		}
	}

	/* Field by field comparison */

	return EIF_FALSE;
}
Beispiel #2
0
rt_private void spcopy(register EIF_REFERENCE source, register EIF_REFERENCE target)
{
    /* Copy a special Eiffel object into another one. It assumes that
     * `source' and `target' are not NULL and that count of `target' is greater
     * than count of `source'.
     */

    rt_uint_ptr field_size;
    rt_uint_ptr t_count, s_count;
#ifdef ISE_GC
    uint16 flags;
#endif

    REQUIRE ("source not null", source);
    REQUIRE ("target not null", target);
    REQUIRE ("source is special", HEADER(source)->ov_flags & EO_SPEC);
    REQUIRE ("target is special", HEADER(target)->ov_flags & EO_SPEC);
    REQUIRE ("target_elem_size_identical", RT_SPECIAL_ELEM_SIZE(target) == RT_SPECIAL_ELEM_SIZE(source));

    /* Because we can call copy with special/tuples of different size, we have to take the min size
     * of the two objects to know exactly how much we are allowed to copy without causing a
     * memory corruption. In case users actually call `copy' directly, not indirectly via `twin',
     * the postcondition of `copy' will be violated, although it would have been better to have
     * a precondition it is much better than memory corruption.*/

    /* FIXME: Once `copy' is not exported anymore to ANY, but just TUPLE/SPECIAL then we will be able
     * to add RT_SPECIAL_COUNT(target) == RT_SPECIAL_COUNT(source) as precondition of `spcopy'.*/

    t_count = RT_SPECIAL_COUNT(target);
    s_count = RT_SPECIAL_COUNT(source);
    field_size = (t_count > s_count ? s_count : t_count) * (rt_uint_ptr) RT_SPECIAL_ELEM_SIZE (source);
    memmove(target, source, field_size);			/* Block copy */

#ifdef ISE_GC
    /* Ok, normally we would have to perform age tests, by scanning the special
     * object, looking for new objects. But a special object can be really big,
     * and can also contain some expanded objects, which should be traversed
     * to see if they contain any new objects. Ok, that's enough! This is a
     * GC problem and is normally performed by the GC. So, if the special object
     * is old and contains some references, I am automatically inserting it
     * in the remembered set. The GC will remove it from there at the next
     * cycle, if necessary--RAM.
     */

    flags = HEADER(target)->ov_flags;
    CHECK ("Not forwarded", !(HEADER (target)->ov_flags & B_FWD));
    if ((flags & (EO_REF | EO_OLD | EO_REM)) == (EO_OLD | EO_REF))
        /* May it hold new references? */
        eremb(target);	/* Remember it, even if not needed, to fasten
								copying process. */
#endif /* ISE_GC */
}
Beispiel #3
0
rt_private EIF_REFERENCE spclone(EIF_REFERENCE source)
{
    /* Clone an of Eiffel object `source'. Assumes that source
     * is a special object.
     */

    EIF_GET_CONTEXT
    EIF_REFERENCE result;		/* Clone pointer */
    union overhead *zone;		/* Pointer on source header */
    uint16 flags;				/* Source object flags */
    EIF_TYPE_INDEX dtype, dftype;

    if ((EIF_REFERENCE) 0 == source)
        return (EIF_REFERENCE) 0;				/* Void source */

    RT_GC_PROTECT(source);			/* Protection against GC */

    zone = HEADER(source);				/* Allocation of a new object */
    flags = zone->ov_flags;
    dtype = zone->ov_dtype;
    dftype = zone->ov_dftype;
    result = spmalloc(RT_SPECIAL_COUNT(source), RT_SPECIAL_ELEM_SIZE(source), EIF_TEST(!(flags & EO_REF)));

    /* Keep the reference flag and the composite one and the type */
    HEADER(result)->ov_flags |= flags & (EO_REF | EO_COMP);
    HEADER(result)->ov_dtype = dtype;
    HEADER(result)->ov_dftype = dftype;
    /* Keep the count and the element size */
    RT_SPECIAL_COUNT(result) = RT_SPECIAL_COUNT(source);
    RT_SPECIAL_ELEM_SIZE(result) = RT_SPECIAL_ELEM_SIZE(source);
    RT_SPECIAL_CAPACITY(result) = RT_SPECIAL_COUNT(source);

    if (!egc_has_old_special_semantic) {
        /* If by default allocation does not clear the data of a SPECIAL,
         * we actually need to do clear it otherwise we end up with a SPECIAL
         * object that is susceptible to be manipulated by the GC while waiting to
         * be filled. */
        memset(result, 0, RT_SPECIAL_VISIBLE_SIZE(result));
    }

    RT_GC_WEAN(source);				/* Remove GC protection */

    return result;
}
Beispiel #4
0
rt_public void sp_copy_data (EIF_REFERENCE Current, EIF_REFERENCE source, EIF_INTEGER source_index, EIF_INTEGER destination_index, EIF_INTEGER n)
{
    /* Copy `n' elements of `source' starting at index `source_index' to `Current'
     * starting at index `destination_index'.
     * Indexes are assumed to start from 0 and we assume that
     * memory has been properly allocated beforehand.
     */

    rt_uint_ptr elem_size;

    REQUIRE ("Current not null", Current);
    REQUIRE ("source not null", source);
    REQUIRE ("Special object", HEADER (source)->ov_flags & EO_SPEC);
    REQUIRE ("Special object", HEADER (Current)->ov_flags & EO_SPEC);
    REQUIRE ("Not tuple object", !(HEADER (source)->ov_flags & EO_TUPLE));
    REQUIRE ("Not tuple object", !(HEADER (Current)->ov_flags & EO_TUPLE));
    REQUIRE ("Not a special of expanded", !(HEADER(Current)->ov_flags & EO_COMP));
    REQUIRE ("source_index non_negative", source_index >= 0);
    REQUIRE ("destination_index non_negative", destination_index >= 0);
    REQUIRE ("n non_negative", n >= 0);
    REQUIRE ("source_index_valid", source_index + n <= RT_SPECIAL_COUNT(source));
    REQUIRE ("source_index valid for destination", destination_index + n <= RT_SPECIAL_CAPACITY(Current));

    elem_size = RT_SPECIAL_ELEM_SIZE(source);
    memmove(Current + ((rt_uint_ptr) destination_index * elem_size), source + ((rt_uint_ptr) source_index * elem_size), (rt_uint_ptr) n * elem_size);

#ifdef ISE_GC
    /* Ok, normally we would have to perform age tests, by scanning the special
     * object, looking for new objects. But a special object can be really big,
     * and can also contain some expanded objects, which should be traversed
     * to see if they contain any new objects. Ok, that's enough! This is a
     * GC problem and is normally performed by the GC. So, if the special object
     * is old and contains some references, I am automatically inserting it
     * in the remembered set. The GC will remove it from there at the next
     * cycle, if necessary--RAM.
     * Of course we only do that when `source' and `Current' represents different objects. -- Manu
     */
    if
    ((Current != source) &&
            ((HEADER(Current)->ov_flags & (EO_REF | EO_OLD | EO_REM)) == (EO_OLD | EO_REF)))
    {
        /* May it hold new references? */
        eremb(Current);	/* Remember it, even if not needed, to fasten
								copying process. */
    }
#endif
}
Beispiel #5
0
rt_private uint32 pst_store(struct rt_store_context *a_context, EIF_REFERENCE object, uint32 a_object_count)
{
	/* Second pass of the store mechanism: writing on the disk. */
	EIF_REFERENCE o_ref;
	EIF_REFERENCE o_ptr;
	long i, nb_references;
	union overhead *zone = HEADER(object);
	uint16 flags;
	int is_expanded, has_volatile_attributes = 0;
	EIF_BOOLEAN object_needs_index;
	long saved_file_pos = 0;
	long saved_object_count = a_object_count;

	REQUIRE ("valid need_index and make_index", (need_index && make_index) || (!need_index && !make_index));

	if (need_index) {
		object_needs_index = (EIF_BOOLEAN) ((EIF_BOOLEAN (*)(EIF_REFERENCE, EIF_REFERENCE))need_index)
			(server,object);
		if (object_needs_index) {
				/* If the object needs an index, the buffer is flushed so that
				 * a new compression header is stored just before the object
				 * thus the decompression will work when starting the retrieve
				 * there */
			a_context->flush_buffer_function();
			saved_file_pos = file_position + parsing_position;
		}
	} else {
		object_needs_index = 0;
	}


	flags = zone->ov_flags;
	is_expanded = eif_is_nested_expanded(flags);
	if (!(is_expanded || (flags & EO_STORE)))
		return a_object_count;		/* Unmarked means already stored */
	else if (!is_expanded)
		a_object_count++;
	
	zone->ov_flags &= ~EO_STORE;	/* Unmark it */

#ifdef DEBUG
	printf("object 0x%" EIF_POINTER_DISPLAY " [%s %" EIF_POINTER_DISPLAY "]\n", (rt_uint_ptr) object, System(zone->ov_dtype).cn_generator, (rt_uint_ptr) zone->ov_flags);
#endif
	/* Evaluation of the number of references of the object */
	if (flags & EO_SPEC) {					/* Special object */
		if (flags & EO_REF) {				/* Special of reference/composite types */
			EIF_INTEGER count, elem_size;
			EIF_REFERENCE ref;

			count = RT_SPECIAL_COUNT(object);
			if (flags & EO_TUPLE) {
				EIF_TYPED_VALUE * l_item = (EIF_TYPED_VALUE *) object;
					/* Don't forget that first element of TUPLE is the BOOLEAN
					 * `object_comparison' attribute. */
				l_item++;
				count--;
				for (; count > 0; count--, l_item++) {
					if (eif_is_reference_tuple_item(l_item)) {
						o_ref = eif_reference_tuple_item(l_item);
						if (o_ref) {
							a_object_count = pst_store (a_context, o_ref, a_object_count);
						}
					}
				}
			} else if (!(flags & EO_COMP)) {		/* Special of references */
				for (ref = object; count > 0; count--,
						ref = (EIF_REFERENCE) ((EIF_REFERENCE *) ref + 1)) {
					o_ref = *(EIF_REFERENCE *) ref;
					if (o_ref != (EIF_REFERENCE) 0)
						a_object_count = pst_store (a_context, o_ref,a_object_count);
				}
			} else {						/* Special of composites */
				elem_size = RT_SPECIAL_ELEM_SIZE(object);
				for (ref = object + OVERHEAD; count > 0;
					count --, ref += elem_size) {
					a_object_count = pst_store (a_context, ref,a_object_count);
				}
			}
		}
	} else {								/* Normal object */
		nb_references = References(zone->ov_dtype);

		/* Traversal of references of `object' */
		for (
			o_ptr = object, i = 0;
			i < nb_references;
			i++, o_ptr = (EIF_REFERENCE) (((EIF_REFERENCE *) o_ptr) +1)
		) {
			o_ref = *(EIF_REFERENCE *)o_ptr;
			if (o_ref) {
				if (!EIF_IS_TRANSIENT_ATTRIBUTE(System(zone->ov_dtype), i)) {
					a_object_count = pst_store (a_context, o_ref, a_object_count);
				} else {
					has_volatile_attributes = 1;
				}
			}
		}
	}

	if (!is_expanded) {
		a_context->object_write_function(object, has_volatile_attributes);		/* write the object */
	}

	/* Call `make_index' on `server' with `object' */
    if (object_needs_index) {
		(make_index)(server, object, saved_file_pos, a_object_count - saved_object_count);
	}

	return a_object_count;
}
Beispiel #6
0
rt_private EIF_BOOLEAN rdeepiso(EIF_REFERENCE target,EIF_REFERENCE source)
{
	/* Recursive isomorphism test.
	 * Return a boolean.
	 */

	RT_GET_CONTEXT
	union overhead *zone = HEADER(target);	/* Target header */
	uint32 flags;							/* Target flags */
	EIF_REFERENCE s_ref, t_ref, t_field, s_field;
	EIF_INTEGER count, elem_size;

	flags = zone->ov_flags;

	/* Check if the object has already been inspected */
	if (s_put(eif_equality_table, target) == EIF_SEARCH_CONFLICT)
		return EIF_TRUE;

	/* Isomorphism test between `source' and `target'.
	 * Two cases: either a normal object or a special object.
	 */
	if (flags & EO_SPEC) {
		/* Special or tuple objects */
		if (!spiso(target, source))
			return EIF_FALSE;

		if (!(flags & EO_REF))
			/* No reference to inspect */
			return EIF_TRUE;

		/* Evaluation of the count of the target special object */
		count = RT_SPECIAL_COUNT(target);

		if (flags & EO_TUPLE) {
			EIF_TYPED_VALUE * l_source = (EIF_TYPED_VALUE *) source;
			EIF_TYPED_VALUE * l_target = (EIF_TYPED_VALUE *) target;
				/* Don't forget that first element of TUPLE is the BOOLEAN
				 * `object_comparison' attribute. */
			for (; count > 0; count--, l_source++, l_target++) {
				if
					(eif_is_reference_tuple_item(l_source) &&
					eif_is_reference_tuple_item(l_target))
				{
					s_field = eif_reference_tuple_item (l_source);
					t_field = eif_reference_tuple_item (l_target);
					if ((s_field == NULL) && (t_field == NULL)) {
						continue;
					} else if ((s_field) && (t_field)) {
						if (!rdeepiso(t_field, s_field)) {
							return EIF_FALSE;
						}
					} else {
						return EIF_FALSE;
					}
				}
			}
			return EIF_TRUE;
		} else if (!(flags & EO_COMP)) {
			CHECK("Special of reference", flags & EO_REF);
			/* Specials filled with references: we have to iterate on fields
			* two by two.
			*/
			/* Evaluation of the count of the target special object */
			for(
				s_ref = (EIF_REFERENCE)source, t_ref = (EIF_REFERENCE) target;
				count > 0;
				count --,
					s_ref = (EIF_REFERENCE) ((EIF_REFERENCE *) s_ref + 1),
					t_ref = (EIF_REFERENCE) ((EIF_REFERENCE *) t_ref + 1)
			) {
				/* Evaluation of two references */
				s_field = *(EIF_REFERENCE *) s_ref;
				t_field = *(EIF_REFERENCE *) t_ref;
				if ((((EIF_REFERENCE) 0) == s_field) && (((EIF_REFERENCE) 0) == t_field))
					/* Two void references */
					continue;
				else if ((EIF_REFERENCE) 0 != s_field && (EIF_REFERENCE) 0 != t_field) {
					/* Recursion on references of the special object */
					if (!rdeepiso(t_field, s_field))
						return EIF_FALSE;
				} else
					return EIF_FALSE;
			}
			return EIF_TRUE;
		} else {
			CHECK("Special of expanded with references", flags & EO_REF);
			/* Special objects filled with (non-special) expanded objects.
			 * we call then standard isomorphism test on normal objects.
			 */
			elem_size = RT_SPECIAL_ELEM_SIZE(target);
			for (
				s_ref = source+OVERHEAD, t_ref = target+OVERHEAD;
				count > 0;
				count--, s_ref += elem_size, t_ref += elem_size
			) {
				/* Iteration on expanded elements which cannot be special
				 * objects
				 */
				if (!(rdeepiter(t_ref, s_ref)))
					return EIF_FALSE;
			}
			return EIF_TRUE;
		}
	} else {
		/* Normal object */
		if (!eiso(target, source))
			return EIF_FALSE;

		/* Iteration on references */
		return rdeepiter(target, source);
	}
	/* NOTREACHED */
	return EIF_FALSE;
}
Beispiel #7
0
rt_public EIF_BOOLEAN spiso(register EIF_REFERENCE target, register EIF_REFERENCE source)
{
	/* Compare two special objects in term of their structures. `source'
	 * and `target' are refering two special objects. There is three cases:
	 * 1- either the elements are direct instances: block comparison.
	 * 2- either the elements are references: comparison of referenced
	 *	dynamic type.
	 * 3- or the elements are expanded: call `eiso' (special objects
	 *	cannot be expanded).
	 */

	union overhead *s_zone;				/* Source header */
	uint32 s_flags;						/* Source flags */
	/*uint32 t_flags;*/					/* Target flags */
	EIF_REFERENCE s_ref;
	EIF_REFERENCE t_ref;
	EIF_INTEGER count;				/* Common count */
	EIF_INTEGER elem_size;			/* Common element size */
	EIF_REFERENCE s_field, t_field;

	REQUIRE("special objects", (HEADER(target)->ov_flags & EO_SPEC) && (HEADER(source)->ov_flags & EO_SPEC));

	if (source == target)
		return EIF_TRUE;

	s_zone = HEADER(source);

#ifdef DEBUG
	dprintf(2)("spiso: source = 0x%lx [%d] target = 0x%lx [%d]\n",
		source, RT_SPECIAL_COUNT(source),
		target, RT_SPECIAL_COUNT(target));
#endif

	count = RT_SPECIAL_COUNT(source);
	if (count != RT_SPECIAL_COUNT(target))
		return EIF_FALSE;

	/* Second condition: same element size */
	elem_size = RT_SPECIAL_ELEM_SIZE(source);
	if (elem_size != RT_SPECIAL_ELEM_SIZE(target))
		return EIF_FALSE;

	s_flags = s_zone->ov_flags;

		/* In final mode, we can do block comparison on special of basic types
		 * or on special of expanded which have no references since they have no header.
		 * In workbench mode, block comparison is only possible on special of basic types. */
#ifdef WORKBENCH
	if (!(s_flags & EO_REF) && !(s_flags & EO_COMP)) {
#else
	if (!(s_flags & EO_REF)) {
#endif
		/* Case 1: specials filled with direct instances: block comparison */
		return EIF_TEST(!memcmp (source, target, (rt_uint_ptr) count * (rt_uint_ptr) elem_size));
	}

	if (s_flags & EO_TUPLE) {
		EIF_TYPED_VALUE * l_source = (EIF_TYPED_VALUE *) source;
		EIF_TYPED_VALUE * l_target = (EIF_TYPED_VALUE *) target;
			/* Don't forget that first element of TUPLE is the BOOLEAN
			 * `object_comparison' attribute. */
		for (; count > 0; count--, l_source++, l_target++) {
			if
				(eif_is_reference_tuple_item(l_source) &&
				eif_is_reference_tuple_item(l_target))
			{
				s_field = eif_reference_tuple_item (l_source);
				t_field = eif_reference_tuple_item (l_target);
				if ((s_field == NULL) && (t_field == NULL)) {
					continue;
				} else if ((s_field) && (t_field) && (Dtype(s_field) == Dtype(t_field))) {
					continue;
				} else {
					return EIF_FALSE;
				}
			}
		}
		return EIF_TRUE;
	} else if ((s_flags & EO_REF) && !(s_flags & EO_COMP)) {
		/* Case 2: specials filled with references: we have to check fields
		 * one by one.
		 */
		for(
			s_ref = (EIF_REFERENCE)source, t_ref = (EIF_REFERENCE) target;
			count > 0;
			count --,
				s_ref = (EIF_REFERENCE) ((EIF_REFERENCE *) s_ref + 1),
				t_ref = (EIF_REFERENCE) ((EIF_REFERENCE *) t_ref + 1)
		) {
			/* Evaluation of two references */
			s_field = *(EIF_REFERENCE *) s_ref;
			t_field = *(EIF_REFERENCE *) t_ref;
			if ((!s_field) && (!t_field))
				/* Two void references */
				continue;
			else if (		(((EIF_REFERENCE) 0) != s_field) &&
							(((EIF_REFERENCE) 0) != t_field) &&
							(Dtype(s_field) == Dtype(t_field))
					)
				/* Two non-void references on objects of same dynamic type */
				continue;
			else
				/* No ismorphism */
				return EIF_FALSE;
		}
		return EIF_TRUE;
	}

	/* Case 3: special objects filled with (non-special) expanded objects.
	 * we call then standard isomorphism test on normal objects.
	 */
	for (
		s_ref = source +OVERHEAD, t_ref = target+OVERHEAD;
		count >0;
		count--, s_ref += elem_size, t_ref += elem_size
	) {
		/* Iteration on expanded elements */
		if (!eiso(t_ref, s_ref))
			return EIF_FALSE;
	}

	return EIF_TRUE;
}

rt_public EIF_BOOLEAN ediso(EIF_REFERENCE target, EIF_REFERENCE source)
{
	/* Compare recursively the structure attached to `target' to the
	 * one attached to `source'. This is the standard Eiffel feature
	 * because it called recursively the standard isomorhic Eiffel
	 * feature.
	 * Return a boolean.
	 */

	RT_GET_CONTEXT
	EIF_BOOLEAN result;
#ifdef ISE_GC
	char g_status;		/* Save GC status */

	g_status = eif_gc_ison();
	if (g_status)
		eif_gc_stop();						/* Stop GC if enabled*/
#endif

	eif_equality_table = s_create(100);				/* Create search table */
	result = rdeepiso(target,source);	/* Recursive isomorphism test */

#ifdef ISE_GC
	if (g_status)
		eif_gc_run();						/* Enabled GC it was previously enabled */
#endif

	eif_rt_xfree((EIF_REFERENCE) (eif_equality_table->s_keys));	/* Free search table keys */
	eif_rt_xfree((EIF_REFERENCE) eif_equality_table);			/* Free search table descriptor */
	eif_equality_table = NULL;
	return result;
}
Beispiel #8
0
rt_private void rdeepclone (EIF_REFERENCE source, EIF_REFERENCE enclosing, rt_uint_ptr offset)
/* Source object to be cloned */
/* Object receiving clone */
/* Offset within enclosing where attachment is made */
{
    /* Recursive deep clone of `source' is attached to `receiver'. Then
     * enclosing parameter gives us a pointer to where the address of the
     * currently built object lies, or in other words, it's the object on
     * which we are currently recurring. That way, we are able to perform aging
     * tests when the attachment to the receiving reference is done.
     */

    RT_GET_CONTEXT
    EIF_REFERENCE clone, c_field;
    uint16 flags;
    EIF_INTEGER count, elem_size;

    REQUIRE("source not null", source);
    REQUIRE("enclosing not null", enclosing);

    flags = HEADER(source)->ov_flags;

    if (!(flags & EO_STORE)) {		/* Object has already been cloned */

        /* Object is no longer marked: it has already been duplicated and
         * thus the resulting duplication is in the hash table.
         */

        clone = *hash_search(&hclone, source);
        *(EIF_REFERENCE *) (enclosing + offset) = clone;
        CHECK ("Not forwarded", !(HEADER (enclosing)->ov_size & B_FWD));
        RTAR(enclosing, clone);
        return;
    }

    /* The object has not already been duplicated */

    flags &= ~EO_STORE;						/* Unmark the object */
    HEADER(source)->ov_flags = flags;		/* Resynchronize object flags */
    clone = duplicate(source, enclosing, offset);	/* Duplicate object */

    /* The object has now been duplicated and entered in the H table */

    if (flags & EO_SPEC) {					/* Special object */
        if (!(flags & EO_REF)) {				/* No references */
            return;
        }
        count = RT_SPECIAL_COUNT(clone);			/* Number of items in special */

        /* If object is filled up with references, loop over it and recursively
         * deep clone them. If the object has expanded objects, then we need
         * to update their possible intra expanded fields (in case they
         * themselves have expanded objects) and also to deep clone them.
         */

        if (flags & EO_TUPLE) {
            EIF_TYPED_VALUE * l_target = (EIF_TYPED_VALUE *) clone;
            EIF_TYPED_VALUE * l_source = (EIF_TYPED_VALUE *) source;
            /* Don't forget that first element of TUPLE is the BOOLEAN
             * `object_comparison' attribute. */
            for (offset = 0; count > 0; count--, l_target++, l_source++, offset +=sizeof(EIF_TYPED_VALUE)) {
                if (eif_is_reference_tuple_item(l_source)) {
                    c_field = eif_reference_tuple_item(l_target);
                    if (c_field) {
                        rdeepclone(c_field, clone, offset);
                    }
                }
            }
        } else if (!(flags & EO_COMP))	{	/* Special object filled with references */
            for (offset = 0; count > 0; count--, offset += REFSIZ) {
                c_field = *(EIF_REFERENCE *) (clone + offset);
                /* Iteration on non void references and Eiffel references */
                if (c_field) {
                    rdeepclone(c_field, clone, offset);
                }
            }
        } else {					/* Special filled with expanded objects */
            elem_size = RT_SPECIAL_ELEM_SIZE(clone);
            for (offset = OVERHEAD; count > 0; count--, offset += elem_size)
                expanded_update(source, clone + offset, DEEP);
        }

    } else
        expanded_update(source, clone, DEEP); /* Update intra expanded refs */
}
rt_public EIF_REFERENCE eif_once_objects_of_result_type(EIF_TYPE result_type) 
	/* All once objects held by the system */
{
	RT_GET_CONTEXT
	EIF_REFERENCE Result;
	union overhead *zone;
	struct obj_array l_found;

	size_t i; 
#if defined(EIF_THREADS) && defined(ISE_GC)
	size_t l_threads_count;
#endif
	char gc_stopped;

	/* Lock global once mutex. */
#ifdef EIF_THREADS
	EIF_ASYNC_SAFE_CS_LOCK(eif_global_once_set_mutex);
#endif

		/* Initialize structure that will hold found objects */
	l_found.count = 0;
	l_found.capacity = 64;
	l_found.area = malloc (sizeof (EIF_REFERENCE) * l_found.capacity);
	if (!l_found.area) {
		enomem();
	}

#ifndef EIF_THREADS
#ifdef WORKBENCH
	rt_ostack_lookup (&l_found, &once_set);
#else
	rt_oastack_lookup (&l_found, &once_set);
#endif
#else
	rt_oastack_lookup(&l_found, &global_once_set);
#ifdef ISE_GC
	l_threads_count = once_set_list.count;
	i = 0;
	while (i < l_threads_count) {
		rt_ostack_lookup (&l_found, once_set_list.threads.ostack[i++]);
		i++;
	}
#endif
#endif

	/* Unlock global once mutex */
#ifdef EIF_THREADS
	EIF_ASYNC_SAFE_CS_UNLOCK(eif_global_once_set_mutex);
#endif

		/* Now `l_found' is properly populated so let's create
		 * SPECIAL objects of type `result_type' that we will return.
		 * We turn off GC since we do not want objects to be moved. */
	gc_stopped = !eif_gc_ison();
	eif_gc_stop();

	Result = spmalloc (l_found.count, sizeof (EIF_REFERENCE), EIF_FALSE);
	zone = HEADER (Result);
	zone->ov_flags |= EO_REF;
	zone->ov_dftype = result_type.id;
	zone->ov_dtype = To_dtype(result_type.id);
	RT_SPECIAL_COUNT(Result) = l_found.count;
	RT_SPECIAL_ELEM_SIZE(Result) = sizeof(EIF_REFERENCE);
	RT_SPECIAL_CAPACITY(Result) = l_found.count;

		/* Now, populate `Result' with content of `l_found'. Since we just
		 * created a new Eiffel objects. */
	for (i = 0 ; i < l_found.count ; i++) {

			/* Store object in `Result'. */
		*((EIF_REFERENCE*) Result + i) = l_found.area [i];
		RTAR(Result, l_found.area [i]);
	}

	free (l_found.area);

		/* Let's turn back the GC on */
	if (!gc_stopped) eif_gc_run();

	return Result;
}