コード例 #1
0
ファイル: loader_disk.c プロジェクト: BogdanStroe/cubrid
/*
 * get_class_heap - This locates or creates a heap file for a class.
 *    return: heap file of the class
 *    classop(in): class object
 *    class(in): class structure
 */
static HFID *
get_class_heap (MOP classop, SM_CLASS * class_)
{
  HFID *hfid;
  OID *class_oid;

  hfid = sm_ch_heap ((MOBJ) class_);
  if (HFID_IS_NULL (hfid))
    {
      /* could also accomplish this by creating a single instance */
      /* 
       * make sure the class is fetched for update so that it will be
       * marked dirty and stored with the new heap
       */
      if (au_fetch_class (classop, &class_, AU_FETCH_UPDATE, AU_INSERT) != NO_ERROR)
	{
	  hfid = NULL;
	}
      else
	{
	  const bool reuse_oid = (class_->flags & SM_CLASSFLAG_REUSE_OID) ? true : false;

	  class_oid = ws_oid (classop);
	  if (OID_ISTEMP (class_oid))
	    {			/* not defined function */
	      class_oid = locator_assign_permanent_oid (classop);
	    }
	  if (xheap_create (NULL, hfid, class_oid, reuse_oid) != NO_ERROR)
	    {
	      hfid = NULL;
	    }
	}
    }
  return (hfid);
}
コード例 #2
0
ファイル: compactdb_cl.c プロジェクト: dong1/testsize
static int
do_reclaim_class_addresses (const OID class_oid, char **class_name,
			    bool * const any_class_can_be_referenced,
			    bool * const correctly_processed,
			    bool * const addresses_reclaimed,
			    int *const error_while_processing)
{
  DB_OBJECT *class_mop = NULL;
  DB_OBJECT *parent_mop = NULL;
  SM_CLASS *class_ = NULL;
  SM_CLASS *parent_class_ = NULL;
  int error_code = NO_ERROR;
  int skipped_error_code = NO_ERROR;
  bool do_abort_on_error = true;
  bool can_reclaim_addresses = true;
  LIST_MOPS *lmops = NULL;
  HFID *hfid = NULL;

  assert (!OID_ISNULL (&class_oid));
  assert (any_class_can_be_referenced != NULL);
  assert (correctly_processed != NULL);
  assert (addresses_reclaimed != NULL);
  assert (error_while_processing != NULL);
  assert (class_name != NULL);

  *correctly_processed = false;
  *addresses_reclaimed = false;
  *error_while_processing = NO_ERROR;

  error_code = db_commit_transaction ();
  if (error_code != NO_ERROR)
    {
      goto error_exit;
    }

  error_code = db_set_isolation (TRAN_READ_COMMITTED);
  if (error_code != NO_ERROR)
    {
      goto error_exit;
    }

  /*
   * Trying to force an ISX_LOCK on the root class. It somehow happens that
   * we are left with an IX_LOCK in the end...
   */
  if (locator_fetch_class (sm_Root_class_mop, DB_FETCH_QUERY_WRITE) == NULL)
    {
      error_code = ER_FAILED;
      goto error_exit;
    }

  class_mop = db_object ((OID *) (&class_oid));
  if (class_mop == NULL)
    {
      skipped_error_code = ER_FAILED;
      goto error_exit;
    }

  if (!locator_is_class (class_mop, DB_FETCH_WRITE))
    {
      skipped_error_code = ER_FAILED;
      goto error_exit;
    }

  /*
   * We need an X_LOCK on the class to process as early as possible so that
   * other transactions don't add references to it in the schema.
   */
  class_ = (SM_CLASS *) locator_fetch_class (class_mop, DB_FETCH_WRITE);
  if (class_ == NULL)
    {
      skipped_error_code = er_errid ();
      goto error_exit;
    }

  assert (*class_name == NULL);
  *class_name = strdup (class_->header.name);
  if (*class_name == NULL)
    {
      error_code = ER_FAILED;
      goto error_exit;
    }

  if (class_->partition_of != NULL)
    {
      /*
       * If the current class is a partition of a partitioned class we need
       * to get its parent partitioned table and check for references to its
       * parent too. If table tbl has partition tbl__p__p0, a reference to tbl
       * can point to tbl__p__p0 instances too.
       */
      skipped_error_code = do_get_partition_parent (class_mop, &parent_mop);
      if (skipped_error_code != NO_ERROR)
	{
	  goto error_exit;
	}
      if (parent_mop != NULL)
	{
	  parent_class_ =
	    (SM_CLASS *) locator_fetch_class (parent_mop, DB_FETCH_WRITE);
	  if (parent_class_ == NULL)
	    {
	      skipped_error_code = er_errid ();
	      goto error_exit;
	    }
	}
    }

  skipped_error_code = locator_flush_all_instances (class_mop, true);
  if (skipped_error_code != NO_ERROR)
    {
      goto error_exit;
    }

  if (class_->class_type != SM_CLASS_CT)
    {
      can_reclaim_addresses = false;
    }
  else
    {
      hfid = sm_heap ((MOBJ) class_);
      if (HFID_IS_NULL (hfid))
	{
	  can_reclaim_addresses = false;
	}
    }

  if (class_->flags & SM_CLASSFLAG_SYSTEM)
    {
      /*
       * It should be safe to process system classes also but we skip them for
       * now. Please note that class_instances_can_be_referenced () does not
       * check for references from system classes.
       * If this is ever changed please consider the impact of reusing system
       * objects OIDs.
       */
      can_reclaim_addresses = false;
    }
  else if (class_->flags & SM_CLASSFLAG_REUSE_OID)
    {
      /*
       * Nobody should be able to hold references to reusable OID tables so it
       * should be safe to reclaim their OIDs and pages no matter what.
       */
      can_reclaim_addresses = true;
    }
  else
    {
      if (*any_class_can_be_referenced)
	{
	  /*
	   * Some class attribute has OBJECT or SET OF OBJECT as the domain.
	   * This means it can point to instances of any class so we're not
	   * safe reclaiming OIDs.
	   */
	  can_reclaim_addresses = false;
	}
      else
	{
	  bool class_can_be_referenced = false;

	  /*
	   * IS_LOCK should be enough for what we need but
	   * locator_get_all_class_mops seems to lock the instances with the
	   * lock that it has on their class. So we end up with IX_LOCK on all
	   * classes in the schema...
	   */

	  lmops = locator_get_all_class_mops (DB_FETCH_CLREAD_INSTREAD,
					      is_not_system_class);
	  if (lmops == NULL)
	    {
	      skipped_error_code = ER_FAILED;
	      goto error_exit;
	    }

	  skipped_error_code =
	    class_instances_can_be_referenced (class_mop, parent_mop,
					       &class_can_be_referenced,
					       any_class_can_be_referenced,
					       lmops->mops, lmops->num);
	  if (skipped_error_code != NO_ERROR)
	    {
	      goto error_exit;
	    }
	  /*
	   * If some attribute has OBJECT or the current class as its domain
	   * then it's not safe to reclaim the OIDs as some of the references
	   * might point to deleted objects. We skipped the system classes as
	   * they should not point to any instances of the non-system classes.
	   */
	  can_reclaim_addresses = !class_can_be_referenced &&
	    !*any_class_can_be_referenced;
	  if (lmops != NULL)
	    {
	      /*
	       * It should be safe now to release all the locks we hold on the
	       * schema classes (except for the X_LOCK on the current class).
	       * However, we don't currently have a way of releasing those
	       * locks so we're stuck with them till the end of the current
	       * transaction.
	       */
	      locator_free_list_mops (lmops);
	      lmops = NULL;
	    }
	}
    }

  if (can_reclaim_addresses)
    {
      assert (hfid != NULL && !HFID_IS_NULL (hfid));

      skipped_error_code = heap_reclaim_addresses (hfid);
      if (skipped_error_code != NO_ERROR)
	{
	  goto error_exit;
	}
      *addresses_reclaimed = true;
    }

  error_code = db_commit_transaction ();
  if (error_code != NO_ERROR)
    {
      goto error_exit;
    }

  assert (error_code == NO_ERROR && skipped_error_code == NO_ERROR);
  *correctly_processed = true;
  class_mop = NULL;
  class_ = NULL;
  parent_mop = NULL;
  parent_class_ = NULL;
  return error_code;

error_exit:
  *error_while_processing = skipped_error_code;
  class_mop = NULL;
  class_ = NULL;
  parent_mop = NULL;
  parent_class_ = NULL;
  if (lmops != NULL)
    {
      locator_free_list_mops (lmops);
      lmops = NULL;
    }
  if (do_abort_on_error)
    {
      int tmp_error_code = NO_ERROR;

      if (skipped_error_code == ER_LK_UNILATERALLY_ABORTED ||
	  error_code == ER_LK_UNILATERALLY_ABORTED)
	{
	  tmp_error_code = tran_abort_only_client (false);
	}
      else
	{
	  tmp_error_code = db_abort_transaction ();
	}
      if (tmp_error_code != NO_ERROR)
	{
	  if (error_code == NO_ERROR)
	    {
	      error_code = tmp_error_code;
	    }
	}
    }
  if (skipped_error_code == NO_ERROR && error_code == NO_ERROR)
    {
      error_code = ER_FAILED;
    }
  return error_code;
}
コード例 #3
0
 /*
  * boot_compact_db - compact specified classes
  * HEAP_CACHE_ATTRINFO structure
  *    return: error status
  *    class_oids(in): the classes list
  *    n_classes(in): the class_oids length
  * hfids(in):  the hfid list
  *    space_to_process(in): the space to process
  *    instance_lock_timeout(in): the lock timeout for instances
  *    class_lock_timeout(in): the lock timeout for instances
  *    delete_old_repr(in):  whether to delete the old class representation
  *    last_processed_class_oid(in,out): last processed class oid
  *    last_processed_oid(in,out): last processed oid
  *    total_objects(out): count processed objects for each class
  *    failed_objects(out): count failed objects for each class
  *    modified_objects(out): count modified objects for each class
  *    big_objects(out): count big objects for each class
  *    initial_last_repr_id(in, out): the list of initial last class 
  * representation
  */
int
boot_compact_db (THREAD_ENTRY * thread_p, OID * class_oids, int n_classes,
		 int space_to_process,
		 int instance_lock_timeout,
		 int class_lock_timeout,
		 bool delete_old_repr,
		 OID * last_processed_class_oid,
		 OID * last_processed_oid,
		 int *total_objects, int *failed_objects,
		 int *modified_objects, int *big_objects,
		 int *initial_last_repr_id)
{
  int result = NO_ERROR;
  int i, j, start_index = -1;
  int max_space_to_process, current_tran_index = -1;
  int lock_ret;
  HFID hfid;

  if (boot_can_compact (thread_p) == false)
    {
      return ER_COMPACTDB_ALREADY_STARTED;
    }

  if (class_oids == NULL || n_classes <= 0 ||
      space_to_process <= 0 || last_processed_class_oid == NULL ||
      last_processed_oid == NULL || total_objects == NULL ||
      failed_objects == NULL || modified_objects == NULL ||
      big_objects == NULL || initial_last_repr_id == NULL)
    {
      return ER_QPROC_INVALID_PARAMETER;
    }

  for (start_index = 0; start_index < n_classes; start_index++)
    {
      if (OID_EQ (class_oids + start_index, last_processed_class_oid))
	{
	  break;
	}
    }

  if (start_index == n_classes)
    {
      return ER_QPROC_INVALID_PARAMETER;
    }

  for (i = 0; i < n_classes; i++)
    {
      total_objects[i] = 0;
      failed_objects[i] = 0;
      modified_objects[i] = 0;
      big_objects[i] = 0;
    }

  max_space_to_process = space_to_process;
  for (i = start_index; i < n_classes; i++)
    {
      lock_ret = lock_object_waitsecs (thread_p, class_oids + i,
				       oid_Root_class_oid, IX_LOCK,
				       LK_UNCOND_LOCK, class_lock_timeout);

      if (lock_ret != LK_GRANTED)
	{
	  total_objects[i] = COMPACTDB_LOCKED_CLASS;
	  OID_SET_NULL (last_processed_oid);
	  continue;
	}

      if (heap_get_hfid_from_class_oid (thread_p, class_oids + i, &hfid) !=
	  NO_ERROR)
	{
	  lock_unlock_object (thread_p, class_oids + i, oid_Root_class_oid,
			      IX_LOCK, true);
	  OID_SET_NULL (last_processed_oid);
	  total_objects[i] = COMPACTDB_INVALID_CLASS;
	  continue;
	}

      if (HFID_IS_NULL (&hfid))
	{
	  lock_unlock_object (thread_p, class_oids + i, oid_Root_class_oid,
			      IX_LOCK, true);
	  OID_SET_NULL (last_processed_oid);
	  total_objects[i] = COMPACTDB_INVALID_CLASS;
	  continue;
	}

      if (OID_ISNULL (last_processed_oid))
	{
	  initial_last_repr_id[i] =
	    heap_get_class_repr_id (thread_p, class_oids + i);
	  if (initial_last_repr_id[i] <= 0)
	    {
	      lock_unlock_object (thread_p, class_oids + i,
				  oid_Root_class_oid, IX_LOCK, true);
	      total_objects[i] = COMPACTDB_INVALID_CLASS;
	      continue;
	    }
	}

      if (process_class
	  (thread_p, class_oids + i, &hfid, max_space_to_process,
	   &instance_lock_timeout, &space_to_process,
	   last_processed_oid, total_objects + i,
	   failed_objects + i, modified_objects + i, big_objects + i) !=
	  NO_ERROR)
	{
	  OID_SET_NULL (last_processed_oid);
	  for (j = start_index; j <= i; j++)
	    {
	      total_objects[j] = COMPACTDB_UNPROCESSED_CLASS;
	      failed_objects[j] = 0;
	      modified_objects[j] = 0;
	      big_objects[j] = 0;
	    }

	  result = ER_FAILED;
	  break;
	}

      if (delete_old_repr &&
	  OID_ISNULL (last_processed_oid) && failed_objects[i] == 0 &&
	  heap_get_class_repr_id (thread_p, class_oids + i) ==
	  initial_last_repr_id[i])
	{
	  lock_ret = lock_object_waitsecs (thread_p, class_oids + i,
					   oid_Root_class_oid, X_LOCK,
					   LK_UNCOND_LOCK,
					   class_lock_timeout);
	  if (lock_ret == LK_GRANTED)
	    {
	      if (catalog_drop_old_representations (thread_p, class_oids + i)
		  != NO_ERROR)
		{
		  for (j = start_index; j <= i; j++)
		    {
		      total_objects[j] = COMPACTDB_UNPROCESSED_CLASS;
		      failed_objects[j] = 0;
		      modified_objects[j] = 0;
		      big_objects[j] = 0;
		    }

		  result = ER_FAILED;
		}
	      else
		{
		  initial_last_repr_id[i] = COMPACTDB_REPR_DELETED;
		}

	      break;
	    }
	}

      if (space_to_process == 0)
	{
	  break;
	}
    }

  if (OID_ISNULL (last_processed_oid))
    {
      if (i < n_classes - 1)
	{
	  COPY_OID (last_processed_class_oid, class_oids + i + 1);
	}
      else
	{
	  OID_SET_NULL (last_processed_class_oid);
	}
    }
  else
    {
      COPY_OID (last_processed_class_oid, class_oids + i);
    }

  return result;
}
コード例 #4
0
/*
 * xstats_get_statistics_from_server () - Retrieves the class statistics
 *   return: buffer contaning class statistics, or NULL on error
 *   class_id(in): Identifier of the class
 *   timestamp(in):
 *   length(in): Length of the buffer
 *
 * Note: This function retrieves the statistics for the given class from the
 *       catalog manager and stores them into a buffer. Note that since the
 *       statistics are kept on the current (last) representation structure of
 *       the catalog, only this structure is retrieved. Note further that
 *       since the statistics are used only on the client side they are not
 *       put into a structure here on the server side (not even temporarily),
 *       but stored into a buffer area to be transmitted to the client side.
 */
char *
xstats_get_statistics_from_server (THREAD_ENTRY * thread_p, OID * class_id_p,
				   unsigned int time_stamp, int *length_p)
{
  CLS_INFO *cls_info_p;
  REPR_ID repr_id;
  DISK_REPR *disk_repr_p;
  DISK_ATTR *disk_attr_p;
  BTREE_STATS *btree_stats_p;
  int estimated_npages, estimated_nobjs, estimated_avglen;
  int i, j, k, size, n_attrs, tot_n_btstats, tot_key_info_size;
  char *buf_p, *start_p;

  *length_p = -1;

  cls_info_p = catalog_get_class_info (thread_p, class_id_p);
  if (!cls_info_p)
    {
      return NULL;
    }

  if (time_stamp > 0 && time_stamp >= cls_info_p->time_stamp)
    {
      catalog_free_class_info (cls_info_p);
      *length_p = 0;
      return NULL;
    }

  if (catalog_get_last_representation_id (thread_p, class_id_p, &repr_id) !=
      NO_ERROR)
    {
      catalog_free_class_info (cls_info_p);
      return NULL;
    }

  disk_repr_p = catalog_get_representation (thread_p, class_id_p, repr_id);
  if (!disk_repr_p)
    {
      catalog_free_class_info (cls_info_p);
      return NULL;
    }

  n_attrs = disk_repr_p->n_fixed + disk_repr_p->n_variable;

  tot_n_btstats = tot_key_info_size = 0;
  for (i = 0; i < n_attrs; i++)
    {
      if (i < disk_repr_p->n_fixed)
	{
	  disk_attr_p = disk_repr_p->fixed + i;
	}
      else
	{
	  disk_attr_p = disk_repr_p->variable + (i - disk_repr_p->n_fixed);
	}

      tot_n_btstats += disk_attr_p->n_btstats;
      for (j = 0, btree_stats_p = disk_attr_p->bt_stats;
	   j < disk_attr_p->n_btstats; j++, btree_stats_p++)
	{
	  tot_key_info_size +=
	    (or_packed_domain_size (btree_stats_p->key_type, 0) +
	     (OR_INT_SIZE * btree_stats_p->key_size));
	}
    }

  size = (OR_INT_SIZE		/* time_stamp of CLS_INFO */
	  + OR_INT_SIZE		/* tot_objects of CLS_INFO */
	  + OR_INT_SIZE		/* tot_pages of CLS_INFO */
	  + OR_INT_SIZE		/* n_attrs from DISK_REPR */
	  + (OR_INT_SIZE	/* id of DISK_ATTR */
	     + OR_INT_SIZE	/* type of DISK_ATTR */
	     + STATS_MIN_MAX_SIZE	/* min_value of DISK_ATTR */
	     + STATS_MIN_MAX_SIZE	/* max_value of DISK_ATTR */
	     + OR_INT_SIZE	/* n_btstats of DISK_ATTR */
	  ) * n_attrs);		/* number of attributes */

  size += ((OR_BTID_ALIGNED_SIZE	/* btid of BTREE_STATS */
	    + OR_INT_SIZE	/* leafs of BTREE_STATS */
	    + OR_INT_SIZE	/* pages of BTREE_STATS */
	    + OR_INT_SIZE	/* height of BTREE_STATS */
	    + OR_INT_SIZE	/* keys of BTREE_STATS */
	    + OR_INT_SIZE	/* oids of BTREE_STATS */
	    + OR_INT_SIZE	/* nulls of BTREE_STATS */
	    + OR_INT_SIZE	/* ukeys of BTREE_STATS */
	   ) * tot_n_btstats);	/* total number of indexes */

  size += tot_key_info_size;	/* key_type, pkeys[] of BTREE_STATS */

  start_p = buf_p = (char *) malloc (size);
  if (buf_p == NULL)
    {
      catalog_free_representation (disk_repr_p);
      catalog_free_class_info (cls_info_p);
      return NULL;
    }
  memset (start_p, 0, size);

  OR_PUT_INT (buf_p, cls_info_p->time_stamp);
  buf_p += OR_INT_SIZE;

  if (!HFID_IS_NULL (&cls_info_p->hfid)
      && heap_estimate (thread_p, &cls_info_p->hfid, &estimated_npages,
			&estimated_nobjs, &estimated_avglen) > 0)
    {
      /* use estimates from the heap since it is likely that its estimates
         are more accurate than the ones gathered at update statistics time */

      OR_PUT_INT (buf_p, cls_info_p->tot_objects);
      buf_p += OR_INT_SIZE;

      OR_PUT_INT (buf_p, estimated_npages);
      buf_p += OR_INT_SIZE;
    }
  else
    {
      /* cannot get estimates from the heap, use ones from the catalog */

      OR_PUT_INT (buf_p, cls_info_p->tot_objects);
      buf_p += OR_INT_SIZE;

      OR_PUT_INT (buf_p, cls_info_p->tot_pages);
      buf_p += OR_INT_SIZE;

      estimated_npages = estimated_nobjs = estimated_avglen = 0;
    }

  OR_PUT_INT (buf_p, n_attrs);
  buf_p += OR_INT_SIZE;

  /* put the statistics information of each attribute to the buffer */
  for (i = 0; i < n_attrs; i++)
    {
      if (i < disk_repr_p->n_fixed)
	{
	  disk_attr_p = disk_repr_p->fixed + i;
	}
      else
	{
	  disk_attr_p = disk_repr_p->variable + (i - disk_repr_p->n_fixed);
	}

      OR_PUT_INT (buf_p, disk_attr_p->id);
      buf_p += OR_INT_SIZE;

      OR_PUT_INT (buf_p, disk_attr_p->type);
      buf_p += OR_INT_SIZE;

      switch (disk_attr_p->type)
	{
	case DB_TYPE_INTEGER:
	  OR_PUT_INT (buf_p, disk_attr_p->min_value.i);
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_INT (buf_p, disk_attr_p->max_value.i);
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_BIGINT:
	  OR_PUT_BIGINT (buf_p, disk_attr_p->min_value.bigint);
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_BIGINT (buf_p, disk_attr_p->max_value.bigint);
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_SHORT:
	  /* store these as full integers because of alignment */
	  OR_PUT_INT (buf_p, disk_attr_p->min_value.i);
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_INT (buf_p, disk_attr_p->max_value.i);
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_FLOAT:
	  OR_PUT_FLOAT (buf_p, &(disk_attr_p->min_value.f));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_FLOAT (buf_p, &(disk_attr_p->max_value.f));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_DOUBLE:
	  OR_PUT_DOUBLE (buf_p, &(disk_attr_p->min_value.d));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_DOUBLE (buf_p, &(disk_attr_p->max_value.d));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_DATE:
	  OR_PUT_DATE (buf_p, &(disk_attr_p->min_value.date));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_DATE (buf_p, &(disk_attr_p->max_value.date));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_TIME:
	  OR_PUT_TIME (buf_p, &(disk_attr_p->min_value.time));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_TIME (buf_p, &(disk_attr_p->max_value.time));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_UTIME:
	  OR_PUT_UTIME (buf_p, &(disk_attr_p->min_value.utime));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_UTIME (buf_p, &(disk_attr_p->max_value.utime));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_DATETIME:
	  OR_PUT_DATETIME (buf_p, &(disk_attr_p->min_value.datetime));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_DATETIME (buf_p, &(disk_attr_p->max_value.datetime));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	case DB_TYPE_MONETARY:
	  OR_PUT_MONETARY (buf_p, &(disk_attr_p->min_value.money));
	  buf_p += STATS_MIN_MAX_SIZE;
	  OR_PUT_MONETARY (buf_p, &(disk_attr_p->max_value.money));
	  buf_p += STATS_MIN_MAX_SIZE;
	  break;

	default:
	  break;
	}

      OR_PUT_INT (buf_p, disk_attr_p->n_btstats);
      buf_p += OR_INT_SIZE;

      for (j = 0, btree_stats_p = disk_attr_p->bt_stats;
	   j < disk_attr_p->n_btstats; j++, btree_stats_p++)
	{
	  OR_PUT_BTID (buf_p, &btree_stats_p->btid);
	  buf_p += OR_BTID_ALIGNED_SIZE;

	  /* If the btree file has currently more pages than when we gathered
	     statistics, assume that all growth happen at the leaf level. If
	     the btree is smaller, we use the gathered statistics since the
	     btree may have an external file (unknown at this level) to keep
	     overflow keys. */
	  estimated_npages = file_get_numpages (thread_p,
						&btree_stats_p->btid.vfid);
	  if (estimated_npages > btree_stats_p->pages)
	    {
	      OR_PUT_INT (buf_p, (btree_stats_p->leafs +
				  (estimated_npages - btree_stats_p->pages)));
	      buf_p += OR_INT_SIZE;

	      OR_PUT_INT (buf_p, estimated_npages);
	      buf_p += OR_INT_SIZE;
	    }
	  else
	    {
	      OR_PUT_INT (buf_p, btree_stats_p->leafs);
	      buf_p += OR_INT_SIZE;

	      OR_PUT_INT (buf_p, btree_stats_p->pages);
	      buf_p += OR_INT_SIZE;
	    }

	  OR_PUT_INT (buf_p, btree_stats_p->height);
	  buf_p += OR_INT_SIZE;

	  /* If the estimated objects from heap manager is greater than the
	     estimate when the statistics were gathered, assume that the
	     difference is in distinct keys. */
	  if (estimated_nobjs > cls_info_p->tot_objects)
	    {
	      OR_PUT_INT (buf_p, (btree_stats_p->keys +
				  (estimated_nobjs -
				   cls_info_p->tot_objects)));
	      buf_p += OR_INT_SIZE;
	    }
	  else
	    {
	      OR_PUT_INT (buf_p, btree_stats_p->keys);
	      buf_p += OR_INT_SIZE;
	    }

	  OR_PUT_INT (buf_p, btree_stats_p->oids);
	  buf_p += OR_INT_SIZE;

	  OR_PUT_INT (buf_p, btree_stats_p->nulls);
	  buf_p += OR_INT_SIZE;

	  OR_PUT_INT (buf_p, btree_stats_p->ukeys);
	  buf_p += OR_INT_SIZE;

	  buf_p = or_pack_domain (buf_p, btree_stats_p->key_type, 0, 0);

	  for (k = 0; k < btree_stats_p->key_size; k++)
	    {
	      OR_PUT_INT (buf_p, btree_stats_p->pkeys[k]);
	      buf_p += OR_INT_SIZE;
	    }
	}
    }

  catalog_free_representation (disk_repr_p);
  catalog_free_class_info (cls_info_p);

  *length_p = CAST_STRLEN (buf_p - start_p);

  return start_p;
}