void copy_highpage(struct page *to, struct page *from)
{
	void *vfrom, *vto;

	if (likely(xen_feature(XENFEAT_highmem_assist))
	    && (PageHighMem(from) || PageHighMem(to))) {
		unsigned long from_pfn = page_to_pfn(from);
		unsigned long to_pfn = page_to_pfn(to);
		struct mmuext_op meo;

		meo.cmd = MMUEXT_COPY_PAGE;
		meo.arg1.mfn = pfn_to_mfn(to_pfn);
		meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
		if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
		    && mfn_to_pfn(meo.arg1.mfn) == to_pfn
		    && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
			return;
	}

	vfrom = kmap_atomic(from, KM_USER0);
	vto = kmap_atomic(to, KM_USER1);
	copy_page(vto, vfrom);
	kunmap_atomic(vfrom, KM_USER0);
	kunmap_atomic(vto, KM_USER1);
}
Exemple #2
0
static void pre_suspend(void)
{
	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
	clear_fixmap(FIX_SHARED_INFO);

	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console_mfn =
		mfn_to_pfn(xen_start_info->console_mfn);
}
Exemple #3
0
void xen_arch_pre_suspend(void)
{
	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console.domU.mfn =
		mfn_to_pfn(xen_start_info->console.domU.mfn);

	BUG_ON(!irqs_disabled());

	HYPERVISOR_shared_info = &xen_dummy_shared_info;
	if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
					 __pte_ma(0), 0))
		BUG();
}
CAMLprim value
stub_hypervisor_suspend(value unit)
{
  CAMLparam0();
  int cancelled;

  printk("WARNING: stub_hypervisor_suspend not yet implemented\n");
  cancelled = 1;
#if 0
  /* Turn the store and console mfns to pfns - required because xc_domain_restore uses these values */
  start_info.store_mfn = mfn_to_pfn(start_info.store_mfn);
  start_info.console.domU.mfn = mfn_to_pfn(start_info.console.domU.mfn);

  /* canonicalize_pagetables can't cope with pagetable entries that are outside of the guest's mfns,
     so we must unmap anything outside of our space */
  unmap_shared_info();

  /* Actually do the suspend. When this function returns 0, we've been resumed */
  cancelled = HYPERVISOR_suspend(virt_to_mfn(&start_info));

  if(cancelled) {
    start_info.store_mfn = pfn_to_mfn(start_info.store_mfn);
    start_info.console.domU.mfn = pfn_to_mfn(start_info.console.domU.mfn);
  }

  /* Reinitialise several things */
  trap_init();
  init_events();

  /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
  local_irq_enable();

  setup_xen_features();
  HYPERVISOR_shared_info = map_shared_info(start_info.shared_info);

  /* Set up event and failsafe callback addresses. */
  HYPERVISOR_set_callbacks(
						   (unsigned long)hypervisor_callback,
						   (unsigned long)failsafe_callback, 0);

  init_time();
  arch_rebuild_p2m();

  unmask_evtchn(start_info.console.domU.evtchn);
  unmask_evtchn(start_info.store_evtchn);
#endif
  CAMLreturn(Val_int(cancelled));
}
Exemple #5
0
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
	if (val & _PAGE_PRESENT) {
		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & PTE_FLAGS_MASK;
		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
	}
Exemple #6
0
static int __init xen_hvc_init(void)
{
	struct hvc_struct *hp;
	struct hv_ops *ops;

	if (!xen_pv_domain())
		return -ENODEV;

	if (xen_initial_domain()) {
		ops = &dom0_hvc_ops;
		xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
	} else {
		if (!xen_start_info->console.domU.evtchn)
			return -ENODEV;

		ops = &domU_hvc_ops;
		xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
	}
	if (xencons_irq < 0)
		xencons_irq = 0; /* NO_IRQ */
	else
		irq_set_noprobe(xencons_irq);

	hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
	if (IS_ERR(hp))
		return PTR_ERR(hp);

	hvc = hp;

	console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn);

	return 0;
}
Exemple #7
0
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;
	int old;
	int rc;

	old = atomic_cmpxchg(&xenbus_xsd_state,
	                   XENBUS_XSD_UNCOMMITTED,
	                   XENBUS_XSD_LOCAL_INIT);
	switch (old) {
		case XENBUS_XSD_UNCOMMITTED:
			rc = xb_init_comms();
			if (rc != 0)
				return rc;
			break;

		case XENBUS_XSD_FOREIGN_INIT:
		case XENBUS_XSD_FOREIGN_READY:
			return -EBUSY;
		
		case XENBUS_XSD_LOCAL_INIT:
		case XENBUS_XSD_LOCAL_READY:
		default:
			break;
	}

	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
		return -EINVAL;

	if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
			    size, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
	unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
	dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
	phys_addr_t paddr = dma;

	paddr |= baddr & ~PAGE_MASK;

	return paddr;
}
Exemple #9
0
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
	unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
	dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
	phys_addr_t paddr = dma;

	BUG_ON(paddr != dma); /* truncation has occurred, should never happen */

	paddr |= baddr & ~PAGE_MASK;

	return paddr;
}
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
	if (val & _PAGE_PRESENT) {
		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
		unsigned long pfn = mfn_to_pfn(mfn);

		pteval_t flags = val & PTE_FLAGS_MASK;
		if (unlikely(pfn == ~0))
			val = flags & ~_PAGE_PRESENT;
		else
			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
	}
Exemple #11
0
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;

	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
		return -EINVAL;

	if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
			    size, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Exemple #12
0
static int __init xen_free_mfn(unsigned long mfn)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	set_xen_guest_handle(reservation.extent_start, &mfn);
	reservation.nr_extents = 1;

	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}

/*
 * This releases a chunk of memory and then does the identity map. It's used
 * as a fallback if the remapping fails.
 */
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
			unsigned long end_pfn, unsigned long nr_pages)
{
	unsigned long pfn, end;
	int ret;

	WARN_ON(start_pfn > end_pfn);

	/* Release pages first. */
	end = min(end_pfn, nr_pages);
	for (pfn = start_pfn; pfn < end; pfn++) {
		unsigned long mfn = pfn_to_mfn(pfn);

		/* Make sure pfn exists to start with */
		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
			continue;

		ret = xen_free_mfn(mfn);
		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);

		if (ret == 1) {
			xen_released_pages++;
			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
				break;
		} else
			break;
	}

	set_phys_range_identity(start_pfn, end_pfn);
}
Exemple #13
0
static int __init xen_init(void)
{
	struct hvc_struct *hp;

	if (!xen_pv_domain() ||
	    xen_initial_domain() ||
	    !xen_start_info->console.domU.evtchn)
		return -ENODEV;

	xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
	if (xencons_irq < 0)
		xencons_irq = 0; /* NO_IRQ */

	hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
	if (IS_ERR(hp))
		return PTR_ERR(hp);

	hvc = hp;

	console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn);

	return 0;
}
Exemple #14
0
/*
 * blkif_queue_request
 *
 * request block io
 *
 * id: for guest use only.
 * operation: BLKIF_OP_{READ,WRITE,PROBE}
 * buffer: buffer to read/write into. this should be a
 *   virtual address in the guest os.
 */
int ixp_queue_request(struct app_request *app_req, void *metadata)
{
	struct ixpfront_info *info = (struct ixpfront_info *) metadata;
	unsigned long buffer_mfn;
	struct ixp_request *ring_req;
  	char *req_page = 0, *curr_pos;
	unsigned long id;
	int ref, err;
	grant_ref_t gref_head;

	if (unlikely(info->connected != IXP_STATE_CONNECTED))
		return 1;

  	if (RING_FULL(&info->ring)) {
		printk(KERN_ERR "%s:Ring full - returning backpressure\n", __FUNCTION__);
		return 1;
	}

	if (gnttab_alloc_grant_references(
		IXPIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
		/*gnttab_request_free_callback(
			&info->callback,
			ixp_restart_queue_callback,
			info,
			IXP_MAX_SEGMENTS_PER_REQUEST);*/
		return 1; 
	}

	/* Fill out a communications ring structure. */
	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
	id = get_id_from_freelist(info);

	ring_req->id = id;
	ring_req->handle = info->handle;

	ring_req->operation = IXP_OP_3DES_ENCRYPT;

	ring_req->nr_segments = 1;
	BUG_ON(ring_req->nr_segments > IXPIF_MAX_SEGMENTS_PER_REQUEST);

	req_page = (char *)__get_free_page(GFP_NOIO | __GFP_HIGH);

	if(req_page == 0) {
	  printk(KERN_ERR "ixp_queue_request:Error allocating memory");
	  return 1;
	}

	((struct des_request *)req_page)->key_size = app_req->key_size;
	((struct des_request *)req_page)->iv_size = app_req->iv_size;
	((struct des_request *)req_page)->msg_size = app_req->msg_size;

	curr_pos = req_page + sizeof(struct des_request);
	memcpy(curr_pos, app_req->key, app_req->key_size);
	curr_pos += app_req->key_size;

	memcpy(curr_pos, app_req->iv, app_req->iv_size);
	curr_pos += app_req->iv_size;

	memcpy(curr_pos, app_req->msg, app_req->msg_size);
	curr_pos += app_req->msg_size;

	buffer_mfn = virt_to_mfn(req_page);

 	/* install a grant reference. */
	ref = gnttab_claim_grant_reference(&gref_head);
  	BUG_ON(ref == -ENOSPC);

	gnttab_grant_foreign_access_ref(
	      ref,
	      info->xbdev->otherend_id,
	      buffer_mfn,
	      0);
	
	info->shadow[id].r_params.presp = app_req->presp;
	info->shadow[id].r_params.callbk_tag = app_req->callbk_tag;
	info->shadow[id].frame[0] = mfn_to_pfn(buffer_mfn);
	info->shadow[id].req_page = req_page;

	ring_req->seg[0] =
	      (struct ixp_request_segment) {
		.gref       = ref
	      };

	info->ring.req_prod_pvt++;

	/* Keep a private copy so we can reissue requests when recovering. */
	info->shadow[id].req = *ring_req;

  	flush_requests(info);

	//gnttab_free_grant_references(gref_head);

	return 0;
}
Exemple #15
0
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
					      phys_addr_t end_addr)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	unsigned long start, end;
	unsigned long len = 0;
	unsigned long pfn;
	int ret;

	start = PFN_UP(start_addr);
	end = PFN_DOWN(end_addr);

	if (end <= start)
		return 0;

	printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ",
	       start, end);
	for(pfn = start; pfn < end; pfn++) {
		unsigned long mfn = pfn_to_mfn(pfn);

		/* Make sure pfn exists to start with */
		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
			continue;

		set_xen_guest_handle(reservation.extent_start, &mfn);
		reservation.nr_extents = 1;

		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
					   &reservation);
		WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
		     start, end, ret);
		if (ret == 1) {
			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
			len++;
		}
	}
	printk(KERN_CONT "%ld pages freed\n", len);

	return len;
}

static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
						     const struct e820map *e820)
{
	phys_addr_t max_addr = PFN_PHYS(max_pfn);
	phys_addr_t last_end = ISA_END_ADDRESS;
	unsigned long released = 0;
	int i;

	/* Free any unused memory above the low 1Mbyte. */
	for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
		phys_addr_t end = e820->map[i].addr;
		end = min(max_addr, end);

		if (last_end < end)
			released += xen_release_chunk(last_end, end);
		last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
	}

	if (last_end < max_addr)
		released += xen_release_chunk(last_end, max_addr);

	printk(KERN_INFO "released %ld pages of unused memory\n", released);
	return released;
}
Exemple #16
0
static unsigned long __init xen_do_chunk(unsigned long start,
					 unsigned long end, bool release)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	unsigned long len = 0;
	unsigned long pfn;
	int ret;

	for (pfn = start; pfn < end; pfn++) {
		unsigned long frame;
		unsigned long mfn = pfn_to_mfn(pfn);

		if (release) {
			/* Make sure pfn exists to start with */
			if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
				continue;
			frame = mfn;
		} else {
			if (mfn != INVALID_P2M_ENTRY)
				continue;
			frame = pfn;
		}
		set_xen_guest_handle(reservation.extent_start, &frame);
		reservation.nr_extents = 1;

		ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
					   &reservation);
		WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
		     release ? "release" : "populate", pfn, ret);

		if (ret == 1) {
			if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
				if (release)
					break;
				set_xen_guest_handle(reservation.extent_start, &frame);
				reservation.nr_extents = 1;
				ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
							   &reservation);
				break;
			}
			len++;
		} else
			break;
	}
	if (len)
		printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
		       release ? "Freeing" : "Populating",
		       start, end, len,
		       release ? "freed" : "added");

	return len;
}

static unsigned long __init xen_release_chunk(unsigned long start,
					      unsigned long end)
{
	return xen_do_chunk(start, end, true);
}

static unsigned long __init xen_populate_chunk(
	const struct e820entry *list, size_t map_size,
	unsigned long max_pfn, unsigned long *last_pfn,
	unsigned long credits_left)
{
	const struct e820entry *entry;
	unsigned int i;
	unsigned long done = 0;
	unsigned long dest_pfn;

	for (i = 0, entry = list; i < map_size; i++, entry++) {
		unsigned long s_pfn;
		unsigned long e_pfn;
		unsigned long pfns;
		long capacity;

		if (credits_left <= 0)
			break;

		if (entry->type != E820_RAM)
			continue;

		e_pfn = PFN_DOWN(entry->addr + entry->size);

		/* We only care about E820 after the xen_start_info->nr_pages */
		if (e_pfn <= max_pfn)
			continue;

		s_pfn = PFN_UP(entry->addr);
		/* If the E820 falls within the nr_pages, we want to start
		 * at the nr_pages PFN.
		 * If that would mean going past the E820 entry, skip it
		 */
		if (s_pfn <= max_pfn) {
			capacity = e_pfn - max_pfn;
			dest_pfn = max_pfn;
		} else {
			capacity = e_pfn - s_pfn;
			dest_pfn = s_pfn;
		}

		if (credits_left < capacity)
			capacity = credits_left;

		pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
		done += pfns;
		*last_pfn = (dest_pfn + pfns);
		if (pfns < capacity)
			break;
		credits_left -= pfns;
	}
	return done;
}

static void __init xen_set_identity_and_release_chunk(
	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
	unsigned long *released, unsigned long *identity)
{
	unsigned long pfn;

	/*
	 * If the PFNs are currently mapped, the VA mapping also needs
	 * to be updated to be 1:1.
	 */
	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
		(void)HYPERVISOR_update_va_mapping(
			(unsigned long)__va(pfn << PAGE_SHIFT),
			mfn_pte(pfn, PAGE_KERNEL_IO), 0);

	if (start_pfn < nr_pages)
		*released += xen_release_chunk(
			start_pfn, min(end_pfn, nr_pages));

	*identity += set_phys_range_identity(start_pfn, end_pfn);
}
Exemple #17
0
static int increase_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	long           rc;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	spin_lock_irqsave(&balloon_lock, flags);

	page = balloon_first_page();
	for (i = 0; i < nr_pages; i++) {
		BUG_ON(page == NULL);
		frame_list[i] = page_to_pfn(page);
		page = balloon_next_page(page);
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents = nr_pages;
	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
	if (rc < 0)
		goto out;

	for (i = 0; i < rc; i++) {
		page = balloon_retrieve();
		BUG_ON(page == NULL);

		pfn = page_to_pfn(page);
		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
		       phys_to_machine_mapping_valid(pfn));

		set_phys_to_machine(pfn, frame_list[i]);

		/* Link back into the page tables if not highmem. */
#ifdef CONFIG_PVM
		if (!xen_hvm_domain() && pfn < max_low_pfn) {
			int ret;
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				mfn_pte(frame_list[i], PAGE_KERNEL),
				0);
			BUG_ON(ret);
		}
#endif
		/* Relinquish the page back to the allocator. */
		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
	}

	balloon_stats.current_pages += rc;
   	if (old_totalram_pages + rc < totalram_pages)
   	{
        printk(KERN_INFO "old_totalram=%luKB, totalram_pages=%luKB\n", old_totalram_pages*4, totalram_pages*4);
       	balloon_stats.current_pages = totalram_pages + totalram_bias;
        printk(KERN_INFO "when ballooning, the mem online! totalram=%luKB, current=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4);
   	}
   	old_totalram_pages = totalram_pages;
	

 out:
	spin_unlock_irqrestore(&balloon_lock, flags);

	return rc < 0 ? rc : rc != nr_pages;
}

static int decrease_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	int            need_sleep = 0;
	int ret;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	for (i = 0; i < nr_pages; i++) {
		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
			nr_pages = i;
			need_sleep = 1;
			break;
		}

		pfn = page_to_pfn(page);
		frame_list[i] = pfn_to_mfn(pfn);

		scrub_page(page);

		if (!xen_hvm_domain() && !PageHighMem(page)) {
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				__pte_ma(0), 0);
			BUG_ON(ret);
                }

	}

	/* Ensure that ballooned highmem pages don't have kmaps. */
#ifdef CONFIG_PVM
	kmap_flush_unused();
	flush_tlb_all();
#endif
	spin_lock_irqsave(&balloon_lock, flags);

	/* No more mappings: invalidate P2M and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = mfn_to_pfn(frame_list[i]);
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		balloon_append(pfn_to_page(pfn));
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
	BUG_ON(ret != nr_pages);

	balloon_stats.current_pages -= nr_pages;
   	if(old_totalram_pages < totalram_pages + nr_pages)
   	{
        printk(KERN_INFO "old_totalram=%luKB, totalram_pages=%luKB\n", old_totalram_pages*4, totalram_pages*4);
       	balloon_stats.current_pages = totalram_pages + totalram_bias;
        printk(KERN_INFO "when ballooning, the mem online! totalram=%luKB, current=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4);
   	}
   	old_totalram_pages = totalram_pages;
	
	spin_unlock_irqrestore(&balloon_lock, flags);

	return need_sleep;
}

/*
 * We avoid multiple worker processes conflicting via the balloon mutex.
 * We may of course race updates of the target counts (which are protected
 * by the balloon lock), or with changes to the Xen hard limit, but we will
 * recover from these in time.
 */
static void balloon_process(struct work_struct *work)
{
	int need_sleep = 0;
	long credit;
    long total_increase = 0;
	char buffer[16];

	mutex_lock(&balloon_mutex);
    printk(KERN_INFO "totalram_pages=%luKB, current_pages=%luKB,totalram_bias=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4, totalram_bias*4);

    if (totalram_pages > old_totalram_pages)
    {
        //TODO:Just know that totalram_pages will increase.
        total_increase = (totalram_pages - old_totalram_pages) % GB2PAGE;
        if (totalram_bias > total_increase )
        {
            totalram_bias = totalram_bias - total_increase;
        }
        balloon_stats.current_pages = totalram_pages + totalram_bias;
        old_totalram_pages = totalram_pages;
    }
    printk(KERN_INFO "totalram_pages=%luKB, current_pages=%luKB, totalram_bias=%luKB,total_increase=%ld\n", totalram_pages*4, balloon_stats.current_pages*4, totalram_bias*4, total_increase*4);
	xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1");
	do {
		credit = current_target() - balloon_stats.current_pages;
		if (credit > 0)
			need_sleep = (increase_reservation(credit) != 0);
		if (credit < 0)
			need_sleep = (decrease_reservation(-credit) != 0);

#ifndef CONFIG_PREEMPT
		if (need_resched())
			schedule();
#endif
	} while ((credit != 0) && !need_sleep);

	/* Schedule more work if there is some still to be done. */
	if (current_target() != balloon_stats.current_pages)
	{
		mod_timer(&balloon_timer, jiffies + HZ);
		sprintf(buffer,"%lu",balloon_stats.current_pages<<(PAGE_SHIFT-10));
		xenbus_write(XBT_NIL, "memory", "target", buffer);
	}
	xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0");
	mutex_unlock(&balloon_mutex);
}
Exemple #18
0
static unsigned long __init xen_release_chunk(unsigned long start,
					      unsigned long end)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	unsigned long len = 0;
	unsigned long pfn;
	int ret;

	for(pfn = start; pfn < end; pfn++) {
		unsigned long mfn = pfn_to_mfn(pfn);

		/* Make sure pfn exists to start with */
		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
			continue;

		set_xen_guest_handle(reservation.extent_start, &mfn);
		reservation.nr_extents = 1;

		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
					   &reservation);
		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
		if (ret == 1) {
			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
			len++;
		}
	}
	printk(KERN_INFO "Freeing  %lx-%lx pfn range: %lu pages freed\n",
	       start, end, len);

	return len;
}

static unsigned long __init xen_set_identity_and_release(
	const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
	phys_addr_t start = 0;
	unsigned long released = 0;
	unsigned long identity = 0;
	const struct e820entry *entry;
	int i;

	/*
	 * Combine non-RAM regions and gaps until a RAM region (or the
	 * end of the map) is reached, then set the 1:1 map and
	 * release the pages (if available) in those non-RAM regions.
	 *
	 * The combined non-RAM regions are rounded to a whole number
	 * of pages so any partial pages are accessible via the 1:1
	 * mapping.  This is needed for some BIOSes that put (for
	 * example) the DMI tables in a reserved region that begins on
	 * a non-page boundary.
	 */
	for (i = 0, entry = list; i < map_size; i++, entry++) {
		phys_addr_t end = entry->addr + entry->size;

		if (entry->type == E820_RAM || i == map_size - 1) {
			unsigned long start_pfn = PFN_DOWN(start);
			unsigned long end_pfn = PFN_UP(end);

			if (entry->type == E820_RAM)
				end_pfn = PFN_UP(entry->addr);

			if (start_pfn < end_pfn) {
				if (start_pfn < nr_pages)
					released += xen_release_chunk(
						start_pfn, min(end_pfn, nr_pages));

				identity += set_phys_range_identity(
					start_pfn, end_pfn);
			}
			start = end;
		}
	}

	printk(KERN_INFO "Released %lu pages of unused memory\n", released);
	printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);

	return released;
}
Exemple #19
0
static int increase_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	long           rc;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	balloon_lock(flags);

	page = balloon_first_page();
	for (i = 0; i < nr_pages; i++) {
		BUG_ON(page == NULL);
		frame_list[i] = page_to_pfn(page);;
		page = balloon_next_page(page);
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	rc = HYPERVISOR_memory_op(
		XENMEM_populate_physmap, &reservation);
	if (rc < nr_pages) {
		int ret;
		/* We hit the Xen hard limit: reprobe. */
		set_xen_guest_handle(reservation.extent_start, frame_list);
		reservation.nr_extents   = rc;
		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
				&reservation);
		BUG_ON(ret != rc);
		hard_limit = current_pages + rc - driver_pages;
		goto out;
	}

	for (i = 0; i < nr_pages; i++) {
		page = balloon_retrieve();
		BUG_ON(page == NULL);

		pfn = page_to_pfn(page);
		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
		       phys_to_machine_mapping_valid(pfn));

		/* Update P->M and M->P tables. */
		set_phys_to_machine(pfn, frame_list[i]);

#ifdef CONFIG_XEN
		xen_machphys_update(frame_list[i], pfn);

		/* Link back into the page tables if not highmem. */
		if (pfn < max_low_pfn) {
			int ret;
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
				0);
			BUG_ON(ret);
		}
#endif
		/* Relinquish the page back to the allocator. */
		ClearPageReserved(page);
		set_page_count(page, 1);
		__free_page(page);
	}

	current_pages += nr_pages;
	totalram_pages = current_pages;

 out:
	balloon_unlock(flags);

	return 0;
}

static int decrease_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	void          *v;
	int            need_sleep = 0;
	int ret;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	for (i = 0; i < nr_pages; i++) {
		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
			nr_pages = i;
			need_sleep = 1;
			break;
		}

		pfn = page_to_pfn(page);
		frame_list[i] = pfn_to_mfn(pfn);

		if (!PageHighMem(page)) {
			v = phys_to_virt(pfn << PAGE_SHIFT);
			scrub_pages(v, 1);
#ifdef CONFIG_XEN
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)v, __pte_ma(0), 0);
			BUG_ON(ret);
#endif
		}
#ifdef CONFIG_XEN_SCRUB_PAGES
		else {
			v = kmap(page);
			scrub_pages(v, 1);
			kunmap(page);
		}
#endif
	}

#ifdef CONFIG_XEN
	/* Ensure that ballooned highmem pages don't have kmaps. */
	kmap_flush_unused();
	flush_tlb_all();
#endif

	balloon_lock(flags);

	/* No more mappings: invalidate P2M and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = mfn_to_pfn(frame_list[i]);
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		balloon_append(pfn_to_page(pfn));
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
	BUG_ON(ret != nr_pages);

	current_pages -= nr_pages;
	totalram_pages = current_pages;

	balloon_unlock(flags);

	return need_sleep;
}

/*
 * We avoid multiple worker processes conflicting via the balloon mutex.
 * We may of course race updates of the target counts (which are protected
 * by the balloon lock), or with changes to the Xen hard limit, but we will
 * recover from these in time.
 */
static void balloon_process(void *unused)
{
	int need_sleep = 0;
	long credit;

	down(&balloon_mutex);

	do {
		credit = current_target() - current_pages;
		if (credit > 0)
			need_sleep = (increase_reservation(credit) != 0);
		if (credit < 0)
			need_sleep = (decrease_reservation(-credit) != 0);

#ifndef CONFIG_PREEMPT
		if (need_resched())
			schedule();
#endif
	} while ((credit != 0) && !need_sleep);

	/* Schedule more work if there is some still to be done. */
	if (current_target() != current_pages)
		mod_timer(&balloon_timer, jiffies + HZ);

	up(&balloon_mutex);
}
static unsigned long __init xen_release_chunk(unsigned long start,
					      unsigned long end)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	unsigned long len = 0;
	unsigned long pfn;
	int ret;

	for(pfn = start; pfn < end; pfn++) {
		unsigned long mfn = pfn_to_mfn(pfn);

		/*                                    */
		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
			continue;

		set_xen_guest_handle(reservation.extent_start, &mfn);
		reservation.nr_extents = 1;

		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
					   &reservation);
		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
		if (ret == 1) {
			__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
			len++;
		}
	}
	printk(KERN_INFO "Freeing  %lx-%lx pfn range: %lu pages freed\n",
	       start, end, len);

	return len;
}

static unsigned long __init xen_set_identity_and_release(
	const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
	phys_addr_t start = 0;
	unsigned long released = 0;
	unsigned long identity = 0;
	const struct e820entry *entry;
	int i;

	/*
                                                               
                                                        
                                                              
   
                                                              
                                                            
                                                          
                                                               
                        
  */
	for (i = 0, entry = list; i < map_size; i++, entry++) {
		phys_addr_t end = entry->addr + entry->size;

		if (entry->type == E820_RAM || i == map_size - 1) {
			unsigned long start_pfn = PFN_DOWN(start);
			unsigned long end_pfn = PFN_UP(end);

			if (entry->type == E820_RAM)
				end_pfn = PFN_UP(entry->addr);

			if (start_pfn < end_pfn) {
				if (start_pfn < nr_pages)
					released += xen_release_chunk(
						start_pfn, min(end_pfn, nr_pages));

				identity += set_phys_range_identity(
					start_pfn, end_pfn);
			}
			start = end;
		}
	}

	printk(KERN_INFO "Released %lu pages of unused memory\n", released);
	printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);

	return released;
}
Exemple #21
0
static int __do_suspend(void *ignore)
{
	int i, j, k, fpp, err;

	extern unsigned long max_pfn;
	extern unsigned long *pfn_to_mfn_frame_list_list;
	extern unsigned long *pfn_to_mfn_frame_list[];

	extern void time_resume(void);

	BUG_ON(smp_processor_id() != 0);
	BUG_ON(in_interrupt());

	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		printk(KERN_WARNING "Cannot suspend in "
		       "auto_translated_physmap mode.\n");
		return -EOPNOTSUPP;
	}

	err = smp_suspend();
	if (err)
		return err;

	xenbus_suspend();

	preempt_disable();

#ifdef __i386__
	kmem_cache_shrink(pgd_cache);
#endif
	mm_pin_all();

	__cli();
	preempt_enable();

	gnttab_suspend();

	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
	clear_fixmap(FIX_SHARED_INFO);

	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);

	/*
	 * We'll stop somewhere inside this hypercall. When it returns,
	 * we'll start resuming after the restore.
	 */
	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));

	shutting_down = SHUTDOWN_INVALID;

	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);

	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);

	memset(empty_zero_page, 0, PAGE_SIZE);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(pfn_to_mfn_frame_list_list);

	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			pfn_to_mfn_frame_list_list[k] =
				virt_to_mfn(pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		pfn_to_mfn_frame_list[k][j] =
			virt_to_mfn(&phys_to_machine_mapping[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;

	gnttab_resume();

	irq_resume();

	time_resume();

	switch_idle_mm();

	__sti();

	xencons_resume();

	xenbus_resume();

	smp_resume();

	return err;
}
Exemple #22
0
//void free_pte_page(struct mmu_gather* tlb, struct page *pte)
void free_pte_page(struct page *pte)
{
    struct ptrpte_t *newstruct = NULL;
    struct ptrpte_t *temp_head = NULL;
    int i = 0;
    int counter = 0;
    
    newstruct = (struct ptrpte_t *)kmalloc(sizeof(struct ptrpte_t), GFP_KERNEL);
    newstruct -> content = pte;
    //newstruct -> mmu_tlb = tlb;

    spin_lock(&pte_cache_lock);
    newstruct -> next = pte_head;
    pte_head = newstruct;
    temp_head = pte_head;
    
    /*free node */
    if(pte_used_counter)
    	pte_used_counter--;
    pte_free_counter++;
    //spin_unlock(&pte_cache_lock);
   
    if(pte_used_counter)
    { 
    	//if((pte_free_counter/pte_used_counter>=8) && ((pte_used_counter + pte_free_counter) >= 2100))
    	//if(pte_used_counter + pte_free_counter >= 2100)
    	//if((pte_used_counter/pte_free_counter < 1) && (pte_used_counter >= 63))
    	if((pte_free_counter/pte_used_counter > 4) && ((pte_used_counter + pte_free_counter) >= 320))
    	//if((pte_free_counter/pte_used_counter >= 5) && ((pte_used_counter + pte_free_counter) >= 320))
    	{
 	        printk("pte free counter is %ld\n", pte_free_counter);
 	        printk("pte used counter is %ld\n", pte_used_counter);
        	counter = pte_free_counter * 3 / 10;
        	//counter = 0;
        	for(i=0;i<counter;i++)
		{
	    		pte_head = pte_head->next;
		}
        	pte_free_counter -= counter;
    	}
    }
    spin_unlock(&pte_cache_lock);

    if(counter != 0)
    {
    	struct ptrpte * newstructarray = NULL;
    	struct ptrpte * newstructarray_head = NULL;
    	int rc = 1;
    	newstructarray = (struct ptrpte *)kmalloc(sizeof(struct ptrpte) * counter, GFP_KERNEL);
    	newstructarray_head = newstructarray;
        for (i=0;i<counter;i++)
        {
	    newstruct = temp_head;
	    temp_head = temp_head->next;
	    //newstructarray[i].content = (unsigned long)page_address(newstruct->content);
	    newstructarray[i].content = pfn_to_mfn(page_to_pfn(newstruct->content));
	    //newstructarray[i].mmu_tlb = newstruct->mmu_tlb;
	    kfree(newstruct);
        }
	//hypercall newstructarray
	rc = HYPERVISOR_pte_op(newstructarray, counter);
        //if (rc == 0)
	//else 
 	    //printk("pte cache free error\n");
	    
	//free page to the buddy system
        newstructarray = newstructarray_head;
	for(i=0;i<counter;i++)
	{
	    //tlb_remove_page(newstructarray[i].mmu_tlb,newstructarray[i].content);
	    //__free_page(newstructarray[i].content);
	    __free_page(pfn_to_page(mfn_to_pfn(newstructarray[i].content)));
	}

	//free newstructarray
	kfree(newstructarray);
    }
   
    return;
} 
Exemple #23
0
static unsigned long __init xen_do_chunk(unsigned long start,
					 unsigned long end, bool release)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};
	unsigned long len = 0;
	int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
	unsigned long pfn;
	int ret;

	for (pfn = start; pfn < end; pfn++) {
		unsigned long frame;
		unsigned long mfn = pfn_to_mfn(pfn);

		if (release) {
			/* Make sure pfn exists to start with */
			if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
				continue;
			frame = mfn;
		} else {
			if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
				continue;
			frame = pfn;
		}
		set_xen_guest_handle(reservation.extent_start, &frame);
		reservation.nr_extents = 1;

		ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
					   &reservation);
		WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
		     release ? "release" : "populate", pfn, ret);

		if (ret == 1) {
			if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
				if (release)
					break;
				set_xen_guest_handle(reservation.extent_start, &frame);
				reservation.nr_extents = 1;
				ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
							   &reservation);
				break;
			}
			len++;
		} else
			break;
	}
	if (len)
		printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
		       release ? "Freeing" : "Populating",
		       start, end, len,
		       release ? "freed" : "added");

	return len;
}

static unsigned long __init xen_release_chunk(unsigned long start,
					      unsigned long end)
{
	/*
	 * Xen already ballooned out the E820 non RAM regions for us
	 * and set them up properly in EPT.
	 */
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return end - start;

	return xen_do_chunk(start, end, true);
}

static unsigned long __init xen_populate_chunk(
	const struct e820entry *list, size_t map_size,
	unsigned long max_pfn, unsigned long *last_pfn,
	unsigned long credits_left)
{
	const struct e820entry *entry;
	unsigned int i;
	unsigned long done = 0;
	unsigned long dest_pfn;

	for (i = 0, entry = list; i < map_size; i++, entry++) {
		unsigned long s_pfn;
		unsigned long e_pfn;
		unsigned long pfns;
		long capacity;

		if (credits_left <= 0)
			break;

		if (entry->type != E820_RAM)
			continue;

		e_pfn = PFN_DOWN(entry->addr + entry->size);

		/* We only care about E820 after the xen_start_info->nr_pages */
		if (e_pfn <= max_pfn)
			continue;

		s_pfn = PFN_UP(entry->addr);
		/* If the E820 falls within the nr_pages, we want to start
		 * at the nr_pages PFN.
		 * If that would mean going past the E820 entry, skip it
		 */
		if (s_pfn <= max_pfn) {
			capacity = e_pfn - max_pfn;
			dest_pfn = max_pfn;
		} else {
			capacity = e_pfn - s_pfn;
			dest_pfn = s_pfn;
		}

		if (credits_left < capacity)
			capacity = credits_left;

		pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
		done += pfns;
		*last_pfn = (dest_pfn + pfns);
		if (pfns < capacity)
			break;
		credits_left -= pfns;
	}
	return done;
}

static void __init xen_set_identity_and_release_chunk(
	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
	unsigned long *released, unsigned long *identity)
{
	unsigned long pfn;

	/*
	 * If the PFNs are currently mapped, clear the mappings
	 * (except for the ISA region which must be 1:1 mapped) to
	 * release the refcounts (in Xen) on the original frames.
	 */

	/*
	 * PVH E820 matches the hypervisor's P2M which means we need to
	 * account for the proper values of *release and *identity.
	 */
	for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
	     pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
		pte_t pte = __pte_ma(0);

		if (pfn < PFN_UP(ISA_END_ADDRESS))
			pte = mfn_pte(pfn, PAGE_KERNEL_IO);

		(void)HYPERVISOR_update_va_mapping(
			(unsigned long)__va(pfn << PAGE_SHIFT), pte, 0);
	}

	if (start_pfn < nr_pages)
		*released += xen_release_chunk(
			start_pfn, min(end_pfn, nr_pages));

	*identity += set_phys_range_identity(start_pfn, end_pfn);
}