/*===========================================================================*
 *		           rs_memctl_make_vm_instance			     *
 *===========================================================================*/
static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
{
	int r;
	u32_t flags;
	int verify;
	struct vmproc *this_vm_vmp;

	this_vm_vmp = &vmproc[VM_PROC_NR];

	pt_assert(&this_vm_vmp->vm_pt);

	/* Check if the operation is allowed. */
	assert(num_vm_instances == 1 || num_vm_instances == 2);
	if(num_vm_instances == 2) {
		printf("VM can currently support no more than 2 VM instances at the time.");
		return EPERM;
	}

	/* Copy settings from current VM. */
	new_vm_vmp->vm_flags |= VMF_VM_INSTANCE;
	num_vm_instances++;

	/* Pin memory for the new VM instance. */
	r = map_pin_memory(new_vm_vmp);
	if(r != OK) {
		return r;
	}

	/* Preallocate page tables for the entire address space for both
	 * VM and the new VM instance.
	 */
	flags = 0;
	verify = FALSE;
	r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt,
		VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
	if(r != OK) {
		return r;
	}
	r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt,
		VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
	if(r != OK) {
		return r;
	}

	/* Let the new VM instance map VM's page tables and its own. */
	r = pt_ptmap(this_vm_vmp, new_vm_vmp);
	if(r != OK) {
		return r;
	}
	r = pt_ptmap(new_vm_vmp, new_vm_vmp);
	if(r != OK) {
		return r;
	}

	pt_assert(&this_vm_vmp->vm_pt);
	pt_assert(&new_vm_vmp->vm_pt);

	return OK;
}
/*===========================================================================*
 *				do_rs_memctl	     			     *
 *===========================================================================*/
int do_rs_memctl(message *m_ptr)
{
	endpoint_t ep;
	int req, r, proc_nr;
	struct vmproc *vmp;

	ep = m_ptr->VM_RS_CTL_ENDPT;
	req = m_ptr->VM_RS_CTL_REQ;

	/* Lookup endpoint. */
	if ((r = vm_isokendpt(ep, &proc_nr)) != OK) {
		printf("do_rs_memctl: bad endpoint %d\n", ep);
		return EINVAL;
	}
	vmp = &vmproc[proc_nr];

	/* Process request. */
	switch(req)
	{
	case VM_RS_MEM_PIN:
		/* Only actually pin RS memory if VM can recover from crashes (saves memory). */
		if (num_vm_instances <= 1)
			return OK;
		r = map_pin_memory(vmp);
		return r;
	case VM_RS_MEM_MAKE_VM:
		r = rs_memctl_make_vm_instance(vmp);
		return r;
	case VM_RS_MEM_HEAP_PREALLOC:
		r = rs_memctl_heap_prealloc(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN);
		return r;
	case VM_RS_MEM_MAP_PREALLOC:
		r = rs_memctl_map_prealloc(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN);
		return r;
	case VM_RS_MEM_GET_PREALLOC_MAP:
		r = rs_memctl_get_prealloc_map(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN);
		return r;
	default:
		printf("do_rs_memctl: bad request %d\n", req);
		return EINVAL;
	}
}
Exemple #3
0
/*===========================================================================*
 *				do_rs_memctl	     			     *
 *===========================================================================*/
int do_rs_memctl(message *m_ptr)
{
	endpoint_t ep;
	int req, r, proc_nr;
	struct vmproc *vmp;

	ep = m_ptr->VM_RS_CTL_ENDPT;
	req = m_ptr->VM_RS_CTL_REQ;

	/* Lookup endpoint. */
	if ((r = vm_isokendpt(ep, &proc_nr)) != OK) {
		printf("do_rs_memctl: bad endpoint %d\n", ep);
		return EINVAL;
	}
	vmp = &vmproc[proc_nr];

	/* Process request. */
	switch(req)
	{
	case VM_RS_MEM_PIN:

		/* Do not perform VM_RS_MEM_PIN yet - it costs the full
		 * size of the RS stack (64MB by default) in memory,
		 * and it's needed for functionality that isn't complete /
		 * merged in current Minix (surviving VM crashes).
		 */

#if 0
		r = map_pin_memory(vmp);
		return r;
#else
		return OK;
#endif

	case VM_RS_MEM_MAKE_VM:
		r = rs_memctl_make_vm_instance(vmp);
		return r;
	default:
		printf("do_rs_memctl: bad request %d\n", req);
		return EINVAL;
	}
}
Exemple #4
0
/*===========================================================================*
 *		           rs_memctl_make_vm_instance			     *
 *===========================================================================*/
static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
{
	int r;
	u32_t flags;
	int verify;
	struct vmproc *this_vm_vmp;

	this_vm_vmp = &vmproc[VM_PROC_NR];

	/* Pin memory for the new VM instance. */
	r = map_pin_memory(new_vm_vmp);
	if(r != OK) {
		return r;
	}

	/* Preallocate page tables for the entire address space for both
	 * VM and the new VM instance.
	 */
	flags = 0;
	verify = FALSE;
	r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt, 0, 0, flags, verify);
	if(r != OK) {
		return r;
	}
	r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt, 0, 0, flags, verify);
	if(r != OK) {
		return r;
	}

	/* Let the new VM instance map VM's page tables and its own. */
	r = pt_ptmap(this_vm_vmp, new_vm_vmp);
	if(r != OK) {
		return r;
	}
	r = pt_ptmap(new_vm_vmp, new_vm_vmp);
	if(r != OK) {
		return r;
	}

	return OK;
}
/*===========================================================================*
 *				do_rs_prepare	     			     *
 *===========================================================================*/
int do_rs_prepare(message *m_ptr)
{
	/* Prepare a new instance of a service for an upcoming live-update
	 * switch, based on the old instance of this service.  This call is
	 * used only by RS and only for a multicomponent live update which
	 * includes VM.  In this case, all processes need to be prepared such
	 * that they don't require the new VM instance to perform actions
	 * during live update that cannot be undone in the case of a rollback.
	 */
	endpoint_t src_e, dst_e;
	int src_p, dst_p;
	struct vmproc *src_vmp, *dst_vmp;
	struct vir_region *src_data_vr, *dst_data_vr;
	vir_bytes src_addr, dst_addr;
	int sys_upd_flags;

	src_e = m_ptr->m_lsys_vm_update.src;
	dst_e = m_ptr->m_lsys_vm_update.dst;
        sys_upd_flags = m_ptr->m_lsys_vm_update.flags;

	/* Lookup slots for source and destination process. */
	if(vm_isokendpt(src_e, &src_p) != OK) {
		printf("VM: do_rs_prepare: bad src endpoint %d\n", src_e);
		return EINVAL;
	}
	src_vmp = &vmproc[src_p];
	if(vm_isokendpt(dst_e, &dst_p) != OK) {
		printf("VM: do_rs_prepare: bad dst endpoint %d\n", dst_e);
		return EINVAL;
	}
	dst_vmp = &vmproc[dst_p];

	/* Pin memory for the source process. */
	map_pin_memory(src_vmp);

	/* See if the source process has a larger heap than the destination
	 * process.  If so, extend the heap of the destination process to
	 * match the source's.  While this may end up wasting quite some
	 * memory, it is absolutely essential that the destination process
	 * does not run out of heap memory during the live update window,
	 * and since most processes will be doing an identity transfer, they
	 * are likely to require as much heap as their previous instances.
	 * Better safe than sorry.  TODO: prevent wasting memory somehow;
	 * this seems particularly relevant for RS.
	 */
	src_data_vr = region_search(&src_vmp->vm_regions_avl, VM_MMAPBASE,
	    AVL_LESS);
	assert(src_data_vr);
	dst_data_vr = region_search(&dst_vmp->vm_regions_avl, VM_MMAPBASE,
	    AVL_LESS);
	assert(dst_data_vr);

	src_addr = src_data_vr->vaddr + src_data_vr->length;
	dst_addr = dst_data_vr->vaddr + dst_data_vr->length;
	if (src_addr > dst_addr)
		real_brk(dst_vmp, src_addr);

	/* Now also pin memory for the destination process. */
	map_pin_memory(dst_vmp);

	/* Finally, map the source process's memory-mapped regions into the
	 * destination process.  This needs to happen now, because VM may not
	 * allocate any objects during the live update window, since this
	 * would prevent successful rollback of VM afterwards.  The
	 * destination may not actually touch these regions during the live
	 * update window either, because they are mapped copy-on-write and a
	 * pagefault would also cause object allocation.  Objects are pages,
	 * slab objects, anything in the new VM instance to which changes are
	 * visible in the old VM basically.
	 */
	if (!(sys_upd_flags & SF_VM_NOMMAP))
		map_proc_dyn_data(src_vmp, dst_vmp);

	return OK;
}