Example #1
0
static int fill_page_to_migrate(void *key, void *value) {
   int i, node = page_to_most_accessing_die(value);
   struct page *v = value;
   v->addr += PAGE_SIZE*CLUSTER/2;
   for(i = 0; i < CLUSTER; i++) {
      insert_page(v, node);
      v->addr -= PAGE_SIZE;
   }
   return 0;
}
Example #2
0
File: pmap.c Project: ecros/xv6-vm
static void
boot_map_segment(pde_t * pgdir, paddr_t pa, vaddr_t la, uint size, uint perm)
{
	int ret = 0;
	uint i = 0;
	assert(!(size & 0xfff), "size is not a multiple of PAGE\n");
	for (; i < size; i += PAGE) {
		if ((ret = insert_page(pgdir, pa + i, la + i, perm, 1)) < 0) {
			cprintf("error %d\n",ret);
			panic("error at boot map segment\n");
		}
	}
}
Example #3
0
File: pmap.c Project: ecros/xv6-vm
// Map the segment with physical address pa at linear addresss la
// 
// RETURNS
// 0 on success
// -E_MAP_EXIST, if there is already a page mapped at 'va'
// -E_NO_MEM, if the page table couldn't be allocated
// -E_NOT_AT_PGBOUND, if pa or va or size is not at page boundary
int
map_segment(pde_t * pgdir, paddr_t pa, vaddr_t la, uint size, uint perm)
{
  int ret = 0;
  uint i = 0;
  if (size & 0xfff)
    return -E_NOT_AT_PGBOUND;
  for (; i < size; i += PAGE) {
    if ((ret = insert_page(pgdir, pa + i, la + i, perm, 0)) < 0) {
      return ret;
    }
  }
  return 0;
}
Example #4
0
/* If PAGE is in the circularly linked list (eg, its NEXT isn't NULL),
 * move it to the front of the list. */
static svn_error_t *
move_page_to_front(inprocess_cache_t *cache,
                   struct cache_page *page)
{
  /* This function is called whilst CACHE is locked. */

  SVN_ERR_ASSERT(page != cache->sentinel);

  if (! page->next)
    return SVN_NO_ERROR;

  remove_page_from_list(page);
  insert_page(cache, page);

  return SVN_NO_ERROR;
}
Example #5
0
void tex::build_page ()
	{
	int	pi=0, b, c;
	ptr	p, q, r;
	
#define INF_SHRINK_PAGE "Infinite glue shrinkage found on current page"
	
	if (link(contrib_head) == null || output_active)
		return;
	do {
		p = link(contrib_head);
		if (last_glue != null)
			delete_glue_ref(last_glue);
		last_penalty = 0;
		last_kern = 0;
		if (type(p) == GLUE_NODE) {
			last_glue = glue_ptr(p);
			add_glue_ref(last_glue);
		} else {
			last_glue = null;
			if (type(p) == PENALTY_NODE) {
				last_penalty = penalty(p);
			} else if (type(p) == KERN_NODE) {
				last_kern = kern_width(p);
			}
		}
		switch (type(p))
		{
		case HLIST_NODE:
		case VLIST_NODE:
		case RULE_NODE:
			if (page_contents < BOX_THERE) {
				if (page_contents == EMPTY) {
					freeze_page_specs(BOX_THERE);
				} else {
					page_contents = BOX_THERE;
				}
				q = new_skip_param(TOP_SKIP_CODE);
				link(q) = p;
				link(contrib_head) = q;
				r = glue_ptr(q);
				if (glue_width(r) > box_height(p)) {
					glue_width(r) -= box_height(p);
				} else {
					glue_width(r) = 0;
				}
				continue;
			} else {
				page_total += page_depth + box_height(p);
				page_depth = box_depth(p);
				goto contribute;
			}
		
		case GLUE_NODE:
			if (page_contents < BOX_THERE) {
				goto done;
			} else if (precedes_break(page_tail)) {
				pi = 0;
			} else {
				goto update_heights;
			}
			break;
		
		case KERN_NODE:
			if (page_contents < BOX_THERE) {
				goto done;
			} else if (link(p) == null) {
				return;
			} else if (type(link(p)) == GLUE_NODE) {
				pi = 0;
			} else {
				goto update_heights;
			}
			break;
		
		case PENALTY_NODE:
			if (page_contents < BOX_THERE) {
				goto done;
			} else {
				pi = penalty(p);
			}
			break;

		case WHATSIT_NODE:
			goto contribute;
		
		case MARK_NODE:
			goto contribute;

		case INS_NODE:
			insert_page(p);
			goto contribute;
		
		default:
			confusion("page");
			break;
		}
		if (pi < INF_PENALTY) {
			b = page_badness();
			if (b < AWFUL_BAD) {
				if (pi <= EJECT_PENALTY) {
					c = pi;
				} else if (b < INF_BAD) {
					c = b + pi + insert_penalties;
				} else {
					c = DEPLORABLE;
				}
			} else {
				c = b;
			}
			if (insert_penalties >= 10000)
				c = AWFUL_BAD;
			if (tracing_pages > 0)
				show_page_stats(b, pi, c);
			if (c <= least_page_cost) {
				best_page_break = p;
				best_size = page_goal;
				least_page_cost = c;
				r = link(page_ins_head);
				while (r != page_ins_head) {
					best_ins_ptr(r) = last_ins_ptr(r);
					r = link(r);
				}
			}
			if (c == AWFUL_BAD || pi <= EJECT_PENALTY) {
				fire_up(p);
				if (output_active)
					return;
				continue;
			}
		}
		if (type(p) < GLUE_NODE || type(p) > KERN_NODE) {
			goto contribute;
		}
		
	update_heights:
		if (type(p) == KERN_NODE) {
			page_total += page_depth + kern_width(p);
		} else {
			q = glue_ptr(p);
			page_so_far[2 + stretch_order(q)] += stretch(q);
			page_shrink += shrink(q);
			if (shrink_order(q) != NORMAL && shrink(q) != 0) {
				print_err(INF_SHRINK_PAGE);
				help_inf_shrink_page();
				error();
				r = new_spec(q);
				shrink_order(r) = NORMAL;
				delete_glue_ref(q);
				q = glue_ptr(p) = r;
			}
			page_total += page_depth + glue_width(q);
		}
		page_depth = 0;

	contribute:
		if (page_depth > page_max_depth) {
			page_total = page_total + page_depth - page_max_depth;
			page_depth = page_max_depth;
		}
		page_tail = link(page_tail) = p;
		link(contrib_head) = link(p);
		link(p) = null;
		continue;

	done:
		link(contrib_head) = link(p);
		link(p) = null;
		flush_node_list(p);
	} while (link(contrib_head) != null);
	if (nest_ptr == nest) {
		tail = contrib_head;
	} else {
		contrib_tail = contrib_head;
	}
}
Example #6
0
static svn_error_t *
inprocess_cache_set_internal(inprocess_cache_t *cache,
                             const void *key,
                             void *value,
                             apr_pool_t *scratch_pool)
{
  struct cache_entry *existing_entry;

  existing_entry = apr_hash_get(cache->hash, key, cache->klen);

  /* Is it already here, but we can do the one-item-per-page
   * optimization? */
  if (existing_entry && cache->items_per_page == 1)
    {
      /* Special case!  ENTRY is the *only* entry on this page, so
       * why not wipe it (so as not to leak the previous value).
       */
      struct cache_page *page = existing_entry->page;

      /* This can't be the partial page: items_per_page == 1
       * *never* has a partial page (except for in the temporary state
       * that we're about to fake). */
      SVN_ERR_ASSERT(page->next != NULL);
      SVN_ERR_ASSERT(cache->partial_page == NULL);

      erase_page(cache, page);
      existing_entry = NULL;
    }

  /* Is it already here, and we just have to leak the old value? */
  if (existing_entry)
    {
      struct cache_page *page = existing_entry->page;

      SVN_ERR(move_page_to_front(cache, page));
      cache->data_size -= existing_entry->size;
      if (value)
        {
          SVN_ERR(cache->serialize_func(&existing_entry->value,
                                        &existing_entry->size,
                                        value,
                                        page->page_pool));
          cache->data_size += existing_entry->size;
          if (existing_entry->size == 0)
            existing_entry->value = NULL;
        }
      else
        {
          existing_entry->value = NULL;
          existing_entry->size = 0;
        }

      return SVN_NO_ERROR;
    }

  /* Do we not have a partial page to put it on, but we are allowed to
   * allocate more? */
  if (cache->partial_page == NULL && cache->unallocated_pages > 0)
    {
      cache->partial_page = apr_pcalloc(cache->cache_pool,
                                        sizeof(*(cache->partial_page)));
      cache->partial_page->page_pool = svn_pool_create(cache->cache_pool);
      cache->partial_page_number_filled = 0;
      (cache->unallocated_pages)--;
    }

  /* Do we really not have a partial page to put it on, even after the
   * one-item-per-page optimization and checking the unallocated page
   * count? */
  if (cache->partial_page == NULL)
    {
      struct cache_page *oldest_page = cache->sentinel->prev;

      SVN_ERR_ASSERT(oldest_page != cache->sentinel);

      /* Erase the page and put it in cache->partial_page. */
      erase_page(cache, oldest_page);
    }

  SVN_ERR_ASSERT(cache->partial_page != NULL);

  {
    struct cache_page *page = cache->partial_page;
    struct cache_entry *new_entry = apr_pcalloc(page->page_pool,
                                                sizeof(*new_entry));

    /* Copy the key and value into the page's pool.  */
    new_entry->key = duplicate_key(cache, key, page->page_pool);
    if (value)
      {
        SVN_ERR(cache->serialize_func(&new_entry->value,
                                      &new_entry->size,
                                      value,
                                      page->page_pool));
        cache->data_size += new_entry->size;
        if (new_entry->size == 0)
          new_entry->value = NULL;
      }
    else
      {
        new_entry->value = NULL;
        new_entry->size = 0;
      }

    /* Add the entry to the page's list. */
    new_entry->page = page;
    new_entry->next_entry = page->first_entry;
    page->first_entry = new_entry;

    /* Add the entry to the hash, using the *entry's* copy of the
     * key. */
    apr_hash_set(cache->hash, new_entry->key, cache->klen, new_entry);

    /* We've added something else to the partial page. */
    (cache->partial_page_number_filled)++;

    /* Is it full? */
    if (cache->partial_page_number_filled >= cache->items_per_page)
      {
        insert_page(cache, page);
        cache->partial_page = NULL;
      }
  }

  return SVN_NO_ERROR;
}