Beispiel #1
0
int
aio_fd (buffer_desc_t * buf, dk_hash_t * aio_ht, OFF_T * off)
{
  dbe_storage_t * dbs = buf->bd_storage;
  dk_hash_t * dbs_ht = (dk_hash_t *) gethash ((void*) buf->bd_storage, aio_ht);
  if (buf->bd_storage->dbs_disks)
    {
      int fd;
      disk_stripe_t * dst;
      if (!dbs_ht)
	{
	  dbs_ht = hash_table_allocate (10);
	  sethash ((void*)dbs, aio_ht, (void*) dbs_ht);
	}
      dst = dp_disk_locate (buf->bd_storage, buf->bd_physical_page, off);
      fd = (int)(ptrlong) gethash ((void*)dst, dbs_ht);
      if (!fd)
	{
	  fd = dst_fd (dst);
	  sethash ((void*)dst, dbs_ht, (void*)(ptrlong)fd);
	}
      return fd;
    }
  else
    {
      if (!dbs_ht)
	{
	  sethash ((void*)buf->bd_storage, aio_ht, (void*) 1);
	  mutex_enter (buf->bd_storage->dbs_file_mtx);
	}
      *off = ((int64)PAGE_SZ) * buf->bd_physical_page;
      return buf->bd_storage->dbs_fd;
    }
}
Beispiel #2
0
void
it_free_page (index_tree_t * it, buffer_desc_t * buf)
{
  short l;
  it_map_t * itm;
  dp_addr_t remap;
  ASSERT_IN_MAP (buf->bd_tree, buf->bd_page);
  itm = IT_DP_MAP (buf->bd_tree, buf->bd_page);
  remap = (dp_addr_t) (ptrlong) gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap);
  if (!buf->bd_is_write)
    GPF_T1 ("isp_free_page without write access to buffer.");
  dp_may_compact (buf->bd_storage, buf->bd_page); /* no need to keep deld buffers in checked for compact list */
  l=SHORT_REF (buf->bd_buffer + DP_FLAGS);
  if (!(l == DPF_BLOB || l == DPF_BLOB_DIR)
      && !remap)
    GPF_T1 ("Freeing a page that is not remapped");
  if (DPF_INDEX == l)
    it->it_n_index_est--;
  else
    it->it_n_blob_est--;
  if (buf->bd_page != buf->bd_physical_page && (DPF_BLOB_DIR == l || DPF_BLOB == l))
    GPF_T1 ("blob is not supposed to be remapped");
  DBG_PT_PRINTF (("    Delete %ld remap %ld FL=%d buf=%p\n", buf->bd_page, buf->bd_physical_page, l, buf));
  if (buf->bd_iq)
    {
      mutex_leave (&itm->itm_mtx);
      buf_cancel_write (buf);
      mutex_enter (&itm->itm_mtx);
    }

  if (!remap)
    {
      /* a blob in checkpoint space can be deleted without a remap existing in commit space. */
      if (DPF_BLOB != l && DPF_BLOB_DIR != l )
	GPF_T1 ("not supposed to delete a buffer in a different space unless it's a blob");
      if (buf->bd_is_dirty)
	GPF_T1 ("blob in checkpoint space can't be dirty - has no remap, in commit, hence is in checkpoint");
      sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap, (void*) (ptrlong) DP_DELETED);
      remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_dp_to_buf);
      page_leave_as_deleted (buf);
      return;
    }
  if (IS_NEW_BUFFER (buf))
    /* if this was CREATED AND DELETED without intervening checkpoint the delete
     * does not carry outside the commit space. */
    remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap);
  else
    sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap, (void *) (ptrlong) DP_DELETED);
  if (!remhash (DP_ADDR2VOID (buf->bd_page), &itm->itm_dp_to_buf))
    GPF_T1 ("it_free_page does not hit the buffer in tree cache");

  it_free_remap (it, buf->bd_page, buf->bd_physical_page, l);
  page_leave_as_deleted (buf);
}
Beispiel #3
0
long
stmt_row_bookmark (cli_stmt_t * stmt, caddr_t * row)
{
    long *bmidp;
    long bmid;
    cli_connection_t *con = stmt->stmt_connection;
    caddr_t bm;
    int len;

    if (!stmt->stmt_opts->so_use_bookmarks)
        return 0;

    IN_CON (con);

    if (!con->con_bookmarks)
        con->con_bookmarks = hash_table_allocate (101);

    if (!stmt->stmt_bookmarks)
    {
        stmt->stmt_bookmarks = hash_table_allocate (101);
        stmt->stmt_bookmarks_rev = id_tree_hash_create (101);
    }

    con->con_last_bookmark++;
    len = BOX_ELEMENTS (row);
    bm = row[len - 2];

    bmidp = (long *) id_hash_get (stmt->stmt_bookmarks_rev, (caddr_t) & bm);
    if (bmidp)
    {
        LEAVE_CON (con);
        return (*bmidp);
    }

    bmid = con->con_last_bookmark;
    bm = box_copy_tree (bm);
    sethash ((void *) (ptrlong) bmid, stmt->stmt_bookmarks, (void *) bm);
    id_hash_set (stmt->stmt_bookmarks_rev, (caddr_t) & bm, (caddr_t) & bmid);
    sethash ((void *) (ptrlong) bmid, con->con_bookmarks, (void *) bm);

    LEAVE_CON (con);

    return bmid;
}
Beispiel #4
0
buffer_desc_t *
itc_delta_this_buffer (it_cursor_t * itc, buffer_desc_t * buf, int stay_in_map)
{
  /* The caller has no access but intends to change the parent link. */
  it_map_t * itm;
  dp_addr_t remap_to;
#ifdef PAGE_TRACE
  dp_addr_t old_dp = buf->bd_physical_page;
#endif
#ifdef _NOT
  FAILCK (itc);
#endif
  ASSERT_IN_MAP (itc->itc_tree, buf->bd_page);
  itm = IT_DP_MAP (itc->itc_tree, buf->bd_page);
#ifdef MTX_DEBUG
  if (buf->bd_is_dirty && !gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap))
    GPF_T1 ("dirty but not remapped in checking delta");
#endif
  if (gethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap))
    {
      buf->bd_is_dirty = 1;
      return (buf);
    }
  if (it_can_reuse_logical (itc->itc_tree, buf->bd_page))
    remap_to = buf->bd_page;
  else
    remap_to = em_new_dp (itc->itc_tree->it_extent_map, EXT_REMAP, 0, &itc->itc_n_pages_on_hold);

  if (!remap_to)
    {
      if (LT_CLOSING == itc->itc_ltrx->lt_status)
	{
	  log_error ("Out if disk during commit.  The transaction is in effect and will be replayed from the log at restart.  Exiting due to no disk space, thus cannot maintain separation of checkpoint and commit space and transactional semantic."
		     "This happens due to running out of safety margin, which is not expected to happen.  If this takes place without in fact being out of disk on the database or consistently in a given situation, the condition may be reported to support.   This is a planned exit and not a database corruption.  A core will be made for possible support.");
	  GPF_T1 ("Deliberately made core for possible support");
	}
      if (itc->itc_n_pages_on_hold)
	GPF_T1 ("The database is out of disk during an insert.  The insert has exceeded its space safety margin.  This does not normally happen.  This is a planned exit and not a corruption. Make more disk space available.  If this occurs continuously or without in fact running out of space, this may be reported to support.");
      if (DELTA_STAY_INSIDE == stay_in_map)
	GPF_T1 ("out of disk on reloc_right_leaves.");
      log_error ("Out of disk space for database");
      itc->itc_ltrx->lt_error = LTE_NO_DISK;
      itc_bust_this_trx (itc, &buf, ITC_BUST_THROW);
    }

  buf->bd_physical_page = remap_to;
  sethash (DP_ADDR2VOID (buf->bd_page), &itm->itm_remap,
	   DP_ADDR2VOID (remap_to));
  buf->bd_is_dirty = 1;
  DBG_PT_DELTA_CLEAN (buf, old_dp);
  return buf;
}
Beispiel #5
0
static void *
hosting_client_attach (client_connection_t *cli, hosting_version_t * ver, char *err, int err_max)
{
  void *hcli = NULL;

  if (!cli->cli_module_attachments)
    cli->cli_module_attachments = hash_table_allocate (10);

  hcli = gethash (ver, cli->cli_module_attachments);

  err[0] = 0;
  if (hcli)
    return hcli;

  hcli = ver->hv_client_attach (err, err_max);
  if (err[0] != 0)
    return NULL;

  sethash (ver, cli->cli_module_attachments, hcli);

  return hcli;
}
Beispiel #6
0
int
dbs_file_extend (dbe_storage_t * dbs, extent_t ** new_ext_ret, int is_in_sys_em)
{
  extent_map_t * em;
  	  extent_t * new_ext = NULL;
  int n, n_allocated = 0;
  int32 em_n_free;
  dp_addr_t ext_first = dbs->dbs_n_pages;
  ASSERT_IN_DBS (dbs);
  if (dbf_no_disk)
    return 0;
  if (dbs->dbs_disks)
    {
      n = dbs_seg_extend (dbs, EXTENT_SZ);
      if (n != EXTENT_SZ)
	return 0;
    }
  else
    {
      mutex_enter (dbs->dbs_file_mtx);
      n = fd_extend (dbs, dbs->dbs_fd, EXTENT_SZ);
      mutex_leave (dbs->dbs_file_mtx);
      if (EXTENT_SZ != n)
	return 0;
      dbs->dbs_file_length += PAGE_SZ * EXTENT_SZ;
      dbs->dbs_n_pages+= EXTENT_SZ;
      dbs->dbs_n_free_pages+= EXTENT_SZ;
    }
  wi_storage_offsets ();
  em = dbs->dbs_extent_map;
  if (!em)
    {
      return n;
    }
  if (!is_in_sys_em)
    mutex_enter (em->em_mtx);

  if (dbs_check_extent_free_pages)
    {
      em_n_free = em_free_count (em, EXT_INDEX);
      if (em->em_n_free_pages != em_n_free)
	{
	  log_error ("The %s free pages incorrect %d != %d actually free", em->em_name, em->em_n_free_pages, em_n_free);
	  em->em_n_free_pages = em_n_free;
	}
    }

  if (em->em_n_free_pages < 16)
    {
      /* extending and the system extent has little space.  Make this ext a system index ext.  If allocating some other ext, retry and take the next ext for that.  */
      int fill;
      buffer_desc_t * last;
      last = page_set_last (em->em_buf);
      fill = LONG_REF (last->bd_buffer + DP_BLOB_LEN);
      if (fill + sizeof (extent_t) > PAGE_DATA_SZ)
	{
	  dp_addr_t em_dp = ext_first + n_allocated;
	  n_allocated++;
	  last = page_set_extend (dbs, &em->em_buf, em_dp, DPF_EXTENT_MAP);
	  LONG_SET (last->bd_buffer + DP_BLOB_LEN, sizeof (extent_t));
	  new_ext = (extent_t *) (last->bd_buffer + DP_DATA);
	}
      else
	{
	  new_ext = (extent_t*) (last->bd_buffer + fill + DP_DATA);
	  LONG_SET (last->bd_buffer + DP_BLOB_LEN, fill + sizeof (extent_t));
	}
      em->em_n_pages += EXTENT_SZ;
      em->em_n_free_pages += EXTENT_SZ;
      new_ext->ext_flags = EXT_INDEX;
      new_ext->ext_dp = ext_first;
      if (gethash (DP_ADDR2VOID (new_ext->ext_dp), dbs->dbs_dp_to_extent_map))
	GPF_T1 ("ext for new dp range already exists in dbs");
      sethash (DP_ADDR2VOID (new_ext->ext_dp), em->em_dp_to_ext, (void*)new_ext);
      sethash (DP_ADDR2VOID (new_ext->ext_dp), dbs->dbs_dp_to_extent_map, (void*)em);
      new_ext->ext_prev = EXT_EXTENDS_NONE;
      if (n_allocated)
	{
	  new_ext->ext_pages[0] = 1;
	  em->em_n_free_pages--;
	}
    }
  /* there is a guarantee of at least 16 pages in the dbs sys extent map */
  if (dbs->dbs_n_pages > dbs->dbs_n_pages_in_sets)
    {
      /* add a page of global free set and backup set */
      buffer_desc_t * last = page_set_extend (dbs, &dbs->dbs_free_set, 0, DPF_FREE_SET);
      page_set_checksum_init (last->bd_buffer + DP_DATA);
      if (n_allocated)
	dbs_page_allocated (dbs, ext_first);
      last->bd_page = last->bd_physical_page = em_try_get_dp (em, EXT_INDEX, DP_ANY);
      if (!last->bd_page) GPF_T1 ("0 dp for page set page");
      EM_DEC_FREE (em, EXT_INDEX);

      last = page_set_extend (dbs, &dbs->dbs_incbackup_set, 0, DPF_INCBACKUP_SET);
      page_set_checksum_init (last->bd_buffer + DP_DATA);
      last->bd_page = last->bd_physical_page = em_try_get_dp (em, EXT_INDEX, DP_ANY);
      if (!last->bd_page) GPF_T1 ("0 dp for page set page");
      EM_DEC_FREE (em, EXT_INDEX);
      dbs->dbs_n_pages_in_sets += BITS_ON_PAGE;
    }
  if (dbs->dbs_n_pages > dbs->dbs_n_pages_in_extent_set)
    {
      buffer_desc_t * last = page_set_extend (dbs, &dbs->dbs_extent_set, 0, DPF_EXTENT_SET);
      last->bd_page = last->bd_physical_page = em_try_get_dp (em, EXT_INDEX, DP_ANY);
      if (!last->bd_page) GPF_T1 ("0 dp for extents alloc page");
      EM_DEC_FREE (em, EXT_INDEX);
      LONG_SET (last->bd_buffer + DP_DATA, 1); /* the newly made ext is the 1st of this page of the ext set, so set the bm 1st bit to 1 */
      page_set_checksum_init (last->bd_buffer + DP_DATA);
      dbs->dbs_n_pages_in_extent_set += EXTENT_SZ * BITS_ON_PAGE;
    }
  if (new_ext)
    {
      dbs_extent_allocated (dbs, ext_first);
    }
  *new_ext_ret = new_ext;
  if (!is_in_sys_em)
    mutex_leave (em->em_mtx);
  return n;
}
Beispiel #7
0
int
DBGP_NAME (page_wait_access) (DBGP_PARAMS it_cursor_t * itc, dp_addr_t dp,  buffer_desc_t * buf_from,
    buffer_desc_t ** buf_ret, int mode, int max_change)
{
  buffer_desc_t decoy;
  buffer_desc_t *buf;
  dp_addr_t phys_dp;
  itc->itc_to_reset = RWG_NO_WAIT;
  itc->itc_max_transit_change = max_change;
  itc->itc_must_kill_trx = 0;
  if (!dp)
    GPF_T1 ("Zero DP in page_fault_map_sem");

  if (buf_from)
    {
      ITC_IN_TRANSIT (itc, dp, buf_from->bd_page);
    }
  else
    ASSERT_IN_MAP (itc->itc_tree, dp);

  buf = IT_DP_TO_BUF (itc->itc_tree, dp);
  if (!buf)
    {
      ra_req_t * ra = NULL;
      IT_DP_REMAP (itc->itc_tree, dp, phys_dp);
#ifdef MTX_DEBUG
      em_check_dp (itc->itc_tree->it_extent_map, phys_dp);
      if (phys_dp != dp)
	em_check_dp (itc->itc_tree->it_extent_map, dp);
#endif
      if ((DP_DELETED == phys_dp || dbs_is_free_page (itc->itc_tree->it_storage, phys_dp))
	  && !strchr (wi_inst.wi_open_mode, 'a'))
	{
	  log_error ("Reference to page with free remap dp = %ld, remap = %ld",
		     (long) phys_dp, (long) dp);
	  if (0 && DBS_PAGE_IN_RANGE (itc->itc_tree->it_storage, phys_dp))
	    dbs_page_allocated (itc->itc_tree->it_storage, phys_dp);
	  else
	    {
	      *buf_ret = PF_OF_DELETED;
	      itc->itc_must_kill_trx = 1;
	      itc->itc_to_reset = RWG_WAIT_ANY;
	      ITC_LEAVE_MAPS (itc);
	      return RWG_WAIT_ANY;
	    }
	}
      memset (&decoy, 0, sizeof (buffer_desc_t));
      decoy.bd_being_read = 1;
      if (PA_READ == mode)
	decoy.bd_readers = 1;
      else
	BD_SET_IS_WRITE (&decoy, 1);
      sethash (DP_ADDR2VOID (dp), &IT_DP_MAP (itc->itc_tree, dp)->itm_dp_to_buf, (void*)&decoy);
      ITC_LEAVE_MAPS (itc);
      buf = bp_get_buffer (NULL, BP_BUF_REQUIRED);
      is_read_pending++;
      buf->bd_being_read = 1;
      buf->bd_page = dp;
      buf->bd_storage = itc->itc_tree->it_storage;
      buf->bd_physical_page = phys_dp;
      BD_SET_IS_WRITE (buf, 0);
      buf->bd_write_waiting = NULL;
      if (buf_from && !itc->itc_landed)
	ra = itc_read_aside (itc, buf_from, dp);
      itc->itc_n_reads++;
      ITC_MARK_READ (itc);
      buf->bd_tree = itc->itc_tree;
      buf_disk_read (buf);
      is_read_pending--;
      if (ra)
	itc_read_ahead_blob (itc, ra, RAB_SPECULATIVE);

      if (buf_from)
	{
	  ITC_IN_TRANSIT (itc, dp, buf_from->bd_page)
	    }
	  else
Beispiel #8
0
void
it_free_dp_no_read (index_tree_t * it, dp_addr_t dp, int dp_type)
{
  buffer_desc_t * buf;
  dp_addr_t phys_dp = 0;
  it_map_t * itm = IT_DP_MAP (it, dp);
  ASSERT_IN_MAP (it, dp);
  buf = IT_DP_TO_BUF (it, dp);
  if (buf)
    phys_dp = buf->bd_physical_page;
  else
    IT_DP_REMAP (it, dp, phys_dp);
  if (buf && buf->bd_being_read)
    {
      log_info ("Deleting blob page while it is being read dp=%d .\n", dp);
/* the buffer can be a being read decoy with no dp, so check dps only if not being read */
    }
  else if (phys_dp != dp)
    GPF_T1 ("A blob/hash temp dp is not supposed to be remapped in isp_free_blob_dp_no_read");
  if (buf)
    {
      it_cursor_t itc_auto;
      it_cursor_t * itc = &itc_auto;
      ITC_INIT (itc, isp, NULL);
      itc_from_it (itc, it);
      itc->itc_itm1 = itm; /* already inside, set itc_itm1 to mark this */
/* Note that the the buf is not passed to page_wait_access.  This is because of 'being read' possibility. page_fault will detect this and sync. */
      page_wait_access (itc, dp, NULL, &buf, PA_WRITE, RWG_WAIT_ANY);
      if (PF_OF_DELETED == buf)
	{
	  ITC_LEAVE_MAPS (itc);
	  return;
	}
      if (dp_type != SHORT_REF (buf->bd_buffer + DP_FLAGS))
	GPF_T1 ("About to delete non-blob page from blob page dir.");
      ITC_IN_KNOWN_MAP (itc, dp); /* get back in, could have come out if waited */
      it_free_page (it, buf);
      return;
    }
  DBG_PT_PRINTF (("Free absent blob  L=%d \n", dp));
  {
    dp_addr_t remap = (dp_addr_t) (ptrlong) gethash (DP_ADDR2VOID (dp), &itm->itm_remap);
    dp_addr_t cpt_remap = (dp_addr_t) (ptrlong) DP_CHECKPOINT_REMAP (it->it_storage, dp);
    if (cpt_remap)
      GPF_T1 ("Blob/hash temp dp  not expected to have cpt remap in delete no read");
    if (DPF_BLOB == dp_type)
      it->it_n_blob_est--;
    if (remap)
      {
	/* if this was CREATED AND DELETED without intervening checkpoint the delete
	 * does not carry outside commit space. */
	remhash (DP_ADDR2VOID (dp), &itm->itm_remap);
	em_free_dp (it->it_extent_map, dp, DPF_BLOB == dp_type ? EXT_BLOB : EXT_INDEX);
      }
    else
      {
	if (DPF_HASH == dp_type) GPF_T1 ("a hash temp page is not supposed to be in cpt s[space");
	sethash (DP_ADDR2VOID (dp), &itm->itm_remap, (void *) (ptrlong) DP_DELETED);
      }
  }
}
Beispiel #9
0
buffer_desc_t *
it_new_page (index_tree_t * it, dp_addr_t addr, int type, int in_pmap,
	     it_cursor_t * has_hold)
{
  it_map_t * itm;
  extent_map_t * em = it->it_extent_map;
  int ext_type = (!it->it_blobs_with_index && (DPF_BLOB  == type || DPF_BLOB_DIR == type)) ? EXT_BLOB : EXT_INDEX, n_tries;
  buffer_desc_t *buf;
  buffer_pool_t * action_bp = NULL;
  dp_addr_t physical_dp;

  if (in_pmap)
    GPF_T1 ("do not call isp_new_page in page map");

  physical_dp = em_new_dp (em, ext_type, addr, NULL);
  if (!physical_dp)
    {
      log_error ("Out of disk space for database");
      if (DPF_INDEX == type)
	{
	  /* a split must never fail to get a page.  Use the remap hold as a backup */
	  physical_dp = em_new_dp (it->it_extent_map, EXT_REMAP, 0, &has_hold->itc_n_pages_on_hold);
	  if (!physical_dp)
	    {
	      log_error ("After running out of disk, cannot get a page from remap reserve to complete operation.  Exiting.");
	      call_exit (-1);
	    }
	}
      else
	return NULL;
    }

  if (DPF_INDEX == type)
    it->it_n_index_est++;
  else
    it->it_n_blob_est++;
  for (n_tries =  0; ; n_tries++)
    {
      buf = bp_get_buffer_1 (NULL, &action_bp, in_log_replay ? BP_BUF_REQUIRED : BP_BUF_IF_AVAIL);
      if (buf)
	break;
      if (action_bp)
	bp_delayed_stat_action (action_bp);
      action_bp = NULL;
      if (n_tries > 10)
	{
	  log_error ("Signaling out of disk due to failure to get a buffer.  This condition is not a case of running out of disk");
	  return NULL;
	}
      if (5 == n_tries)
	log_info ("Failed to get a buffer for a new page. Retrying.  If the failure repeats, an out of disk error will be signalled.  The cause of this is having too many buffers wired down for preread, flush or group by/hash join temp space.  To correct, increase the number of buffers in the configuration file.  If this repeats in spite of having hundreds of thousands  of buffers, please report to support.");
      if (n_tries > 4)
	virtuoso_sleep (0, 50000 * (n_tries - 4));
    }
  if (action_bp)
    bp_delayed_stat_action (action_bp);
  if (buf->bd_readers != 1)
    GPF_T1 ("expecting buf to be wired down when allocated");
    buf_dbg_printf (("Buf %x new in tree %x dp=%d\n", buf, isp, physical_dp));
  itm = IT_DP_MAP (it, physical_dp);
  mutex_enter (&itm->itm_mtx);

  sethash (DP_ADDR2VOID (physical_dp), &itm->itm_dp_to_buf, (void*) buf);
  sethash (DP_ADDR2VOID (physical_dp), &itm->itm_remap,
	   DP_ADDR2VOID (physical_dp));
  buf->bd_page = physical_dp;
  buf->bd_physical_page = physical_dp;
  buf->bd_tree = it;
  buf->bd_storage = it->it_storage;
  buf->bd_pl = NULL;
  buf->bd_readers = 0;
  BD_SET_IS_WRITE (buf, 1);
  mutex_leave (&itm->itm_mtx);
  if (em != em->em_dbs->dbs_extent_map && EXT_INDEX == ext_type)
    {
      mutex_enter (em->em_mtx);
      remhash (DP_ADDR2VOID(physical_dp), em->em_uninitialized);
      mutex_leave (em->em_mtx);
    }
#ifdef PAGE_TRACE

  memset (buf->bd_buffer, 0, PAGE_SZ); /* all for debug view */
#else
  memset (buf->bd_buffer, 0, PAGE_SZ - PAGE_DATA_SZ); /* header only */
#endif
  SHORT_SET (buf->bd_buffer + DP_FLAGS, type);
  if (type == DPF_INDEX)
    {
      page_map_t * map = buf->bd_content_map;
      if (!map)
	buf->bd_content_map = (page_map_t*) resource_get (PM_RC (PM_SZ_1));
      else
	{
	  if (map->pm_size > PM_SZ_1)
	    {
	      resource_store (PM_RC (map->pm_size), (void*) map);
	      buf->bd_content_map = (page_map_t *) resource_get (PM_RC (PM_SZ_1));
	    }
	}
      pg_map_clear (buf);
      LONG_SET (buf->bd_buffer + DP_KEY_ID, it->it_key ? it->it_key->key_id : KI_TEMP);
    }
  else if (buf->bd_content_map)
    {
      resource_store (PM_RC (buf->bd_content_map->pm_size), (void*) buf->bd_content_map);
      buf->bd_content_map = NULL;
    }

  buf_set_dirty (buf);
  DBG_PT_PRINTF (("New page L=%d B=%p FL=%d K=%s \n", buf->bd_page, buf, type,
		 it->it_key ? (it->it_key->key_name ? it->it_key->key_name : "unnamed key") : "no key"));
  TC (tc_new_page);
  return buf;
}