Пример #1
0
static void
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
    size_t size, size_t extra, bool zero)
{
	size_t usize_next;
	extent_node_t *node;
	arena_t *arena;
	chunk_purge_t *chunk_purge;
	bool zeroed;

	/* Increase usize to incorporate extra. */
	while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
		usize = usize_next;

	if (oldsize == usize)
		return;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);

	malloc_mutex_lock(&arena->lock);
	chunk_purge = arena->chunk_purge;
	malloc_mutex_unlock(&arena->lock);

	/* Fill if necessary (shrinking). */
	if (oldsize > usize) {
		size_t sdiff = oldsize - usize;
		zeroed = !chunk_purge_wrapper(arena, chunk_purge, ptr, usize,
		    sdiff);
		if (config_fill && unlikely(opt_junk_free)) {
			memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
			zeroed = false;
		}
	} else
		zeroed = true;

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	assert(extent_node_size_get(node) != usize);
	extent_node_size_set(node, usize);
	/* Clear node's zeroed field if zeroing failed above. */
	extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
	malloc_mutex_unlock(&arena->huge_mtx);

	arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);

	/* Fill if necessary (growing). */
	if (oldsize < usize) {
		if (zero || (config_fill && unlikely(opt_zero))) {
			if (!zeroed) {
				memset((void *)((uintptr_t)ptr + oldsize), 0,
				    usize - oldsize);
			}
		} else if (config_fill && unlikely(opt_junk_alloc)) {
			memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
			    oldsize);
		}
	}
}
Пример #2
0
bool SearchTreeNode::UnSerializeString(const string& s,string& result)
{
    result.Clear();
    size_t i;
    int mode = 0;
    wxString entity(_T(""));
    unsigned int u;
    for(i = 0;mode >=0 && i<s.length();i++)
    {
        wxChar ch = s[i];
        if(ch==_T('"') || ch==_T('>') || ch==_T('<'))
        {
            mode = -1; // Error
            break;
        }
        switch(mode)
        {
            case 0: // normal
                if(ch==_T('&'))
                {
                    mode = 1;
                    entity.Clear();
                }
                else
                    result << ch;
            case 1: // escaped
                if(ch==_T('&'))
                {
                    mode = -1; // Error
                    break;
                }
                else if(ch==_T(';'))
                {
                    mode = 0;
                    if(entity==_T("quot"))
                        ch = _T('"');
                    else if(entity==_T("amp"))
                        ch = _T('&');
                    else if(entity==_T("apos"))
                        ch = _T('\'');
                    else if(entity==_T("lt"))
                        ch = _T('<');
                    else if(entity==_T("gt"))
                        ch = _T('>');
                    else if(entity[0]==_T('#') && s2u(entity.substr(1),u))
                        ch = u;
                    else
                    {
                        mode = -1; // Error: Unrecognized entity
                        break;
                    }
                    result << ch;
                }
            break;
        }
    }
    if(mode < 0)
        result.Clear();
    return (mode >= 0);
}
Пример #3
0
static size_t
zone_good_size(malloc_zone_t *zone, size_t size)
{

	if (size == 0)
		size = 1;
	return (s2u(size));
}
Пример #4
0
static void
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
    size_t usize_max, bool zero)
{
	size_t usize, usize_next;
	extent_node_t *node;
	arena_t *arena;
	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
	bool pre_zeroed, post_zeroed;

	/* Increase usize to incorporate extra. */
	for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
	    <= oldsize; usize = usize_next)
		; /* Do nothing. */

	if (oldsize == usize)
		return;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	pre_zeroed = extent_node_zeroed_get(node);

	/* Fill if necessary (shrinking). */
	if (oldsize > usize) {
		size_t sdiff = oldsize - usize;
		if (config_fill && unlikely(opt_junk_free)) {
			memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
			post_zeroed = false;
		} else {
			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
			    ptr, CHUNK_CEILING(oldsize), usize, sdiff);
		}
	} else
		post_zeroed = pre_zeroed;

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	assert(extent_node_size_get(node) != usize);
	extent_node_size_set(node, usize);
	/* Update zeroed. */
	extent_node_zeroed_set(node, post_zeroed);
	malloc_mutex_unlock(&arena->huge_mtx);

	arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);

	/* Fill if necessary (growing). */
	if (oldsize < usize) {
		if (zero || (config_fill && unlikely(opt_zero))) {
			if (!pre_zeroed) {
				memset((void *)((uintptr_t)ptr + oldsize), 0,
				    usize - oldsize);
			}
		} else if (config_fill && unlikely(opt_junk_alloc)) {
			memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
			    oldsize);
		}
	}
}
Пример #5
0
        NEXT

        CASE(SMOD)
        {
            ON_OP();
            updateIOGas();

            m_SPP[0] = m_SP[1] ? s2u(modWorkaround(u2s(m_SP[0]), u2s(m_SP[1]))) : 0;
        }
Пример #6
0
bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
    bool zero)
{
	size_t usize;

	/* Both allocations must be huge to avoid a move. */
	if (oldsize < chunksize)
		return (true);

	assert(s2u(oldsize) == oldsize);
	usize = s2u(size);
	if (usize == 0) {
		/* size_t overflow. */
		return (true);
	}

	/*
	 * Avoid moving the allocation if the existing chunk size accommodates
	 * the new size.
	 */
	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
		huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
		    zero);
		return (false);
	}

	/* Shrink the allocation in-place. */
	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
		huge_ralloc_no_move_shrink(ptr, oldsize, usize);
		return (false);
	}

	/* Attempt to expand the allocation in-place. */
	if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
		if (extra == 0)
			return (true);

		/* Try again, this time without extra. */
		return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
	}
	return (false);
}
Пример #7
0
        NEXT

        CASE(SDIV)
        {
            ON_OP();
            updateIOGas();

            m_SPP[0] = m_SP[1] ? s2u(divWorkaround(u2s(m_SP[0]), u2s(m_SP[1]))) : 0;
            --m_SP;
        }
Пример #8
0
/*
 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
 * sparse data structures such as radix tree nodes efficient with respect to
 * physical memory usage.
 */
void *
base_alloc(tsdn_t *tsdn, size_t size)
{
	void *ret;
	size_t csize, usize;
	extent_node_t *node;
	extent_node_t key;

	/*
	 * Round size up to nearest multiple of the cacheline size, so that
	 * there is no chance of false cache line sharing.
	 */
	csize = CACHELINE_CEILING(size);

	usize = s2u(csize);
	extent_node_init(&key, NULL, NULL, usize, 0, false, false);
	malloc_mutex_lock(tsdn, &base_mtx);
	node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
	if (node != NULL) {
		/* Use existing space. */
		extent_tree_szsnad_remove(&base_avail_szsnad, node);
	} else {
		/* Try to allocate more space. */
		node = base_chunk_alloc(tsdn, csize);
	}
	if (node == NULL) {
		ret = NULL;
		goto label_return;
	}

	ret = extent_node_addr_get(node);
	if (extent_node_size_get(node) > csize) {
		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
		extent_node_size_set(node, extent_node_size_get(node) - csize);
		extent_tree_szsnad_insert(&base_avail_szsnad, node);
	} else
		base_node_dalloc(tsdn, node);
	if (config_stats) {
		base_allocated += csize;
		/*
		 * Add one PAGE to base_resident for every page boundary that is
		 * crossed by the new allocation.
		 */
		base_resident += PAGE_CEILING((vaddr_t)ret + csize) -
		    PAGE_CEILING((vaddr_t)ret);
	}
	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
	malloc_mutex_unlock(tsdn, &base_mtx);
	return (ret);
}
Пример #9
0
static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
	size_t usize;
	extent_node_t *node;
	arena_t *arena;
	bool is_zeroed_subchunk, is_zeroed_chunk;

	usize = s2u(size);
	if (usize == 0) {
		/* size_t overflow. */
		return (true);
	}

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	malloc_mutex_lock(&arena->huge_mtx);
	is_zeroed_subchunk = extent_node_zeroed_get(node);
	malloc_mutex_unlock(&arena->huge_mtx);

	/*
	 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
	 * that it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed_chunk = zero;

	if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
	     &is_zeroed_chunk))
		return (true);

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	extent_node_size_set(node, usize);
	malloc_mutex_unlock(&arena->huge_mtx);

	if (zero || (config_fill && unlikely(opt_zero))) {
		if (!is_zeroed_subchunk) {
			memset((void *)((uintptr_t)ptr + oldsize), 0,
			    CHUNK_CEILING(oldsize) - oldsize);
		}
		if (!is_zeroed_chunk) {
			memset((void *)((uintptr_t)ptr +
			    CHUNK_CEILING(oldsize)), 0, usize -
			    CHUNK_CEILING(oldsize));
		}
	} else if (config_fill && unlikely(opt_junk_alloc)) {
		memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
		    oldsize);
	}

	return (false);
}
Пример #10
0
void *
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
    tcache_t *tcache)
{
	size_t usize;

	usize = s2u(size);
	if (usize == 0) {
		/* size_t overflow. */
		return (NULL);
	}

	return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
// Internal function to query the accelerometer for registers 0-3 and process.
// FYI, PoLa and BaFro are nomenclature from the MMA7660FC datasheet itself,
// representing Portrait/Landscape and Back/Front orientations, respectively.
uint8_t GadgetShield::readAxes(uint8_t *x, uint8_t *y, uint8_t *z, uint8_t *shake, uint8_t *tap, uint8_t *PoLa, uint8_t *BaFro)
{
  uint8_t len, code;

  // Upon wanting to read an entirely new set of values, set valid[] to all 0xFF to
  // indicate invalid values.
  if (numValid == 0) {
    memset(valid, 0xFF, sizeof(valid)); // 0xFF has bit 6 set which is the alert flag
  }

  if (0==(code=twi_writeToReadFrom(ACC_SLAVE_ADDR, (uint8_t *)"\x00", 1, val, 4, &len))) {
    if (len==4) {
      uint8_t i;

      numValid = 0;

      for (i=0; i < 4; i++) {
        if (! (val[i] & 0x40)) { // If alert flag is not set, update valid reading
          valid[i] = val[i];
        }
        if (valid[i] != (uint8_t)0xFF) { // See how many valid readings we have, either new or from last time
          numValid++;
        }
      }
      
      if (numValid == 4) {
        numValid = 0;
        if (x) *x = s2u(valid[0]);
        if (y) *y = s2u(valid[1]);
        if (z) *z = s2u(valid[2]);
        if (shake) *shake = (valid[3] & 0x80) ? 1 : 0;
        if (tap) *tap = (valid[3] & 0x20) ? 1 : 0;
        if (PoLa) *PoLa = (valid[3] >> 2) & 0x7;
        if (BaFro) *BaFro = (valid[3] & 0x3);
        return 1;
      }
    }
Пример #12
0
TEST_END

TEST_BEGIN(test_overflow)
{
	size_t max_size_class;

	max_size_class = get_max_size_class();

	assert_u_eq(size2index(max_size_class+1), NSIZES,
	    "size2index() should return NSIZES on overflow");
	assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
	    "size2index() should return NSIZES on overflow");
	assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
	    "size2index() should return NSIZES on overflow");

	assert_zu_eq(s2u(max_size_class+1), 0,
	    "s2u() should return 0 for unsupported size");
	assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
	    "s2u() should return 0 for unsupported size");
	assert_zu_eq(s2u(SIZE_T_MAX), 0,
	    "s2u() should return 0 on overflow");

	assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
	    "psz2ind() should return NPSIZES on overflow");
	assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
	    "psz2ind() should return NPSIZES on overflow");
	assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
	    "psz2ind() should return NPSIZES on overflow");

	assert_zu_eq(psz2u(max_size_class+1), 0,
	    "psz2u() should return 0 for unsupported size");
	assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
	    "psz2u() should return 0 for unsupported size");
	assert_zu_eq(psz2u(SIZE_T_MAX), 0,
	    "psz2u() should return 0 on overflow");
}
Пример #13
0
bool SearchTreeNode::s2i(const string& s,int& i)
{
    bool is_ok = true;
    i = 0;
    unsigned int u = 0;
    if(!s.IsEmpty())
    {
        if(s[0]==_T('-'))
        {
            if(!s2u(s.substr(1),u))
                is_ok = false;
            else
                i = 0 - u;
        }
        else
        {
            if(!s2u(s.substr(1),u))
                is_ok = false;
            else
                i = u;
        }
    }
    return is_ok;
}
Пример #14
0
static void GetMachineName(str::Str<char>& s)
{
    WCHAR *s1 = ReadRegStr(HKEY_LOCAL_MACHINE, L"HARDWARE\\DESCRIPTION\\System\\BIOS", L"SystemFamily");
    WCHAR *s2 = ReadRegStr(HKEY_LOCAL_MACHINE, L"HARDWARE\\DESCRIPTION\\System\\BIOS", L"SystemVersion");
    ScopedMem<char> s1u(s1 ? str::conv::ToUtf8(s1) : NULL);
    ScopedMem<char> s2u(s2 ? str::conv::ToUtf8(s2) : NULL);

    if (!s1u && !s2u)
        ; // pass
    else if (!s1u)
        s.AppendFmt("Machine: %s\r\n", s2u.Get());
    else if (!s2u || str::EqI(s1u, s2u))
        s.AppendFmt("Machine: %s\r\n", s1u.Get());
    else
        s.AppendFmt("Machine: %s %s\r\n", s1u.Get(), s2u.Get());

    free(s1);
    free(s2);
}
Пример #15
0
bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
    size_t usize_max, bool zero)
{

	assert(s2u(oldsize) == oldsize);

	/* Both allocations must be huge to avoid a move. */
	if (oldsize < chunksize || usize_max < chunksize)
		return (true);

	if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
		/* Attempt to expand the allocation in-place. */
		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
			return (false);
		/* Try again, this time with usize_min. */
		if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
		    CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
		    oldsize, usize_min, zero))
			return (false);
	}

	/*
	 * Avoid moving the allocation if the existing chunk size accommodates
	 * the new size.
	 */
	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
		huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
		    zero);
		return (false);
	}

	/* Attempt to shrink the allocation in-place. */
	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
		return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
	return (true);
}
Пример #16
0
static int get_panel(int oid, data_panel *panel, size_t size)
{
	int ret = (s32b) size;
	switch(oid)
	{
  case 1:
  {
	int i = 0;
	assert( size >= (u32b) boundaries[1].page_rows);
	ret = boundaries[1].page_rows;
	P_I(TERM_L_BLUE, "Name",	"%y",	s2u(op_ptr->full_name), END  );
	P_I(TERM_L_BLUE, "Sex",		"%y",	s2u(sp_ptr->title), END  );
	P_I(TERM_L_BLUE, "Race",	"%y",	s2u(rp_ptr->name), END  );
	P_I(TERM_L_BLUE, "Class",	"%y",	s2u(cp_ptr->name), END  );
	P_I(TERM_L_BLUE, "Title",	"%y",	s2u(show_title()), END  );
	P_I(TERM_L_BLUE, "HP",	"%y/%y",	i2u(p_ptr->chp), i2u(p_ptr->mhp)  );
	P_I(TERM_L_BLUE, "SP",	"%y/%y",	i2u(p_ptr->csp), i2u(p_ptr->msp)  );
	P_I(TERM_L_BLUE, "Level",	"%y",	i2u(p_ptr->lev), END  );
	assert(i == boundaries[1].page_rows);
	return ret;
  }
  case 2:
  {
	int i = 0;
	assert( ret >= boundaries[2].page_rows);
	ret = boundaries[2].page_rows;
	P_I(max_color(p_ptr->lev, p_ptr->max_lev), "Level", "%y", i2u(p_ptr->lev), END  );
	P_I(max_color(p_ptr->exp, p_ptr->max_exp), "Cur Exp", "%y", i2u(p_ptr->exp), END  );
	P_I(TERM_L_GREEN, "Max Exp",	"%y",	i2u(p_ptr->max_exp), END  );
	P_I(TERM_L_GREEN, "Adv Exp",	"%y",	s2u(show_adv_exp()), END  );
	P_I(TERM_L_GREEN, "MaxDepth",	"%y",	s2u(show_depth()), END  );
	P_I(TERM_L_GREEN, "Game Turns",	"%y",	i2u(turn), END  );
	P_I(TERM_L_GREEN, "Standard Turns","%y", i2u(p_ptr->total_energy / 100), END  );
	P_I(TERM_L_GREEN, "Resting Turns","%y",	i2u(p_ptr->resting_turn), END  );
	P_I(TERM_L_GREEN, "Gold",		"%y",	i2u(p_ptr->au), END  );
	assert(i == boundaries[2].page_rows);
	return ret;
  }
  case 3:
  {
	int i = 0;
	assert(ret >= boundaries[3].page_rows);
	ret = boundaries[3].page_rows;
	P_I(TERM_L_BLUE, "Armor", "[%y,%+y]",	i2u(p_ptr->state.dis_ac), i2u(p_ptr->state.dis_to_a)  );
	P_I(TERM_L_BLUE, "Fight", "(%+y,%+y)",	i2u(p_ptr->state.dis_to_h), i2u(p_ptr->state.dis_to_d)  );
	P_I(TERM_L_BLUE, "Melee", "%y",		s2u(show_melee_weapon(&p_ptr->inventory[INVEN_WIELD])), END  );
	P_I(TERM_L_BLUE, "Shoot", "%y",		s2u(show_missile_weapon(&p_ptr->inventory[INVEN_BOW])), END  );
	P_I(TERM_L_BLUE, "Blows", "%y.%y/turn",	i2u(p_ptr->state.num_blow / 100), i2u((p_ptr->state.num_blow / 10) % 10) );
	P_I(TERM_L_BLUE, "Shots", "%y/turn",	i2u(p_ptr->state.num_fire), END  );
	P_I(TERM_L_BLUE, "Infra", "%y ft",	i2u(p_ptr->state.see_infra * 10), END  );
	P_I(TERM_L_BLUE, "Speed", "%y",		s2u(show_speed()), END );
	P_I(TERM_L_BLUE, "Burden","%.1y lbs",	f2u(p_ptr->total_weight/10.0), END  );
	assert(i == boundaries[3].page_rows);
	return ret;
  }
  case 4:
  {
	static struct {
		const char *name;
		int skill;
		int div;
	} skills[] =
	{
		{ "Saving Throw", SKILL_SAVE, 6 },
		{ "Stealth", SKILL_STEALTH, 1 },
		{ "Fighting", SKILL_TO_HIT_MELEE, 12 },
		{ "Shooting", SKILL_TO_HIT_BOW, 12 },
		{ "Disarming", SKILL_DISARM, 8 },
		{ "Magic Device", SKILL_DEVICE, 6 },
		{ "Perception", SKILL_SEARCH_FREQUENCY, 6 },
		{ "Searching", SKILL_SEARCH, 6 }
	};
	int i;
	assert(N_ELEMENTS(skills) == boundaries[4].page_rows);
	ret = N_ELEMENTS(skills);
	if ((u32b) ret > size) ret = size;
	for (i = 0; i < ret; i++)
	{
		s16b skill = p_ptr->state.skills[skills[i].skill];
		panel[i].color = TERM_L_BLUE;
		panel[i].label = skills[i].name;
		if (skills[i].skill == SKILL_SAVE ||
				skills[i].skill == SKILL_SEARCH)
		{
			if (skill < 0) skill = 0;
			if (skill > 100) skill = 100;
			panel[i].fmt = "%y%%";
			panel[i].value[0] = i2u(skill);
			panel[i].color = colour_table[skill / 10];
		}
		else if (skills[i].skill == SKILL_DEVICE)
		{
			panel[i].fmt = "%y";
			panel[i].value[0] = i2u(skill);
			panel[i].color = colour_table[skill / 13];
		}
		else if (skills[i].skill == SKILL_SEARCH_FREQUENCY)
		{
			if (skill <= 0) skill = 1;
			if (skill >= 50)
			{
				panel[i].fmt = "1 in 1";
				panel[i].color = colour_table[10];
			}
			else
			{
				/* convert to % chance of searching */
				skill = 50 - skill;
				panel[i].fmt = "1 in %y";
				panel[i].value[0] = i2u(skill);
				panel[i].color =
					colour_table[(100 - skill*2) / 10];
			}
		}
		else if (skills[i].skill == SKILL_DISARM)
		{
			/* assume disarming a dungeon trap */
			skill -= 5;
			if (skill > 100) skill = 100;
			if (skill < 2) skill = 2;
			panel[i].fmt = "%y%%";
			panel[i].value[0] = i2u(skill);
			panel[i].color = colour_table[skill / 10];
		}
		else
		{
			panel[i].fmt = "%y";
			panel[i].value[0] = s2u(likert(skill, skills[i].div, &panel[i].color));
		}
	}
	return ret;
  }
  case 5:
  {
	int i = 0;
	assert(ret >= boundaries[5].page_rows);
	ret = boundaries[5].page_rows;
	P_I(TERM_L_BLUE, "Age",			"%y",	i2u(p_ptr->age), END );
	P_I(TERM_L_BLUE, "Height",		"%y",	i2u(p_ptr->ht), END  );
	P_I(TERM_L_BLUE, "Weight",		"%y",	i2u(p_ptr->wt), END  );
	P_I(TERM_L_BLUE, "Social",		"%y",	s2u(show_status()), END  );
	P_I(TERM_L_BLUE, "Maximize",	"%y",	c2u(OPT(birth_maximize) ? 'Y' : 'N'), END);
#if 0
	/* Preserve mode deleted */
	P_I(TERM_L_BLUE, "Preserve",	"%y",	c2u(birth_preserve ? 'Y' : 'N'), END);
#endif
	assert(i == boundaries[5].page_rows);
	return ret;
  }
 }
	/* hopefully not reached */
	return 0;
}