/* * Transfer memory mapped regions, using CoW sharing, from 'src_vmp' to * 'dst_vmp', for the source process's address range of 'start_addr' * (inclusive) to 'end_addr' (exclusive). Return OK or an error code. * If the regions seem to have been transferred already, do nothing. */ static int transfer_mmap_regions(struct vmproc *src_vmp, struct vmproc *dst_vmp, vir_bytes start_addr, vir_bytes end_addr) { struct vir_region *start_vr, *check_vr, *end_vr; start_vr = region_search(&src_vmp->vm_regions_avl, start_addr, AVL_GREATER_EQUAL); if (start_vr == NULL || start_vr->vaddr >= end_addr) return OK; /* nothing to do */ /* In the case of multicomponent live update that includes VM, this * function may be called for the same process more than once, for the * sake of keeping code paths as little divergent as possible while at * the same time ensuring that the regions are copied early enough. * * To compensate for these multiple calls, we perform a very simple * check here to see if the region to transfer is already present in * the target process. If so, we can safely skip copying the regions * again, because there is no other possible explanation for the * region being present already. Things would go horribly wrong if we * tried copying anyway, but this check is not good enough to detect * all such problems, since we do a check on the base address only. */ check_vr = region_search(&dst_vmp->vm_regions_avl, start_vr->vaddr, AVL_EQUAL); if (check_vr != NULL) { #if LU_DEBUG printf("VM: transfer_mmap_regions: skipping transfer from " "%d to %d (0x%lx already present)\n", src_vmp->vm_endpoint, dst_vmp->vm_endpoint, start_vr->vaddr); #endif return OK; } end_vr = region_search(&src_vmp->vm_regions_avl, end_addr, AVL_LESS); assert(end_vr != NULL); assert(start_vr->vaddr <= end_vr->vaddr); #if LU_DEBUG printf("VM: transfer_mmap_regions: transferring memory mapped regions " "from %d to %d (0x%lx to 0x%lx)\n", src_vmp->vm_endpoint, dst_vmp->vm_endpoint, start_vr->vaddr, end_vr->vaddr); #endif return map_proc_copy_range(dst_vmp, src_vmp, start_vr, end_vr); }
/// Modifies the value of an integer variable. bool mapreg_setreg(int uid, int val) { int num = (uid & 0x00ffffff); int i = (uid & 0xff000000) >> 24; const char* name = get_str(num); struct region_data* rd; if( !stricmp(name,"$Region") && i > 0 && i < SCRIPT_MAX_ARRAYSIZE && (rd = region_search(i)) != NULL ) region_set_guild(i,val); if( val != 0 ) { if( idb_iput(mapreg_db,uid,val) ) mapreg_dirty = true; // already exists, delay write else if(name[1] != '@') {// write new variable to database char tmp_str[32*2+1]; Sql_EscapeStringLen(mmysql_handle, tmp_str, name, strnlen(name, 32)); if( SQL_ERROR == Sql_Query(mmysql_handle, "INSERT INTO `%s`(`varname`,`index`,`value`) VALUES ('%s','%d','%d')", mapreg_table, tmp_str, i, val) ) Sql_ShowDebug(mmysql_handle); } } else // val == 0 { idb_remove(mapreg_db,uid); if( name[1] != '@' ) {// Remove from database because it is unused. if( SQL_ERROR == Sql_Query(mmysql_handle, "DELETE FROM `%s` WHERE `varname`='%s' AND `index`='%d'", mapreg_table, name, i) ) Sql_ShowDebug(mmysql_handle); } } return true; }
/*===========================================================================* * rs_memctl_heap_prealloc * *===========================================================================*/ static int rs_memctl_heap_prealloc(struct vmproc *vmp, vir_bytes *addr, size_t *len) { struct vir_region *data_vr; vir_bytes bytes; if(*len <= 0) { return EINVAL; } data_vr = region_search(&vmp->vm_regions_avl, VM_MMAPBASE, AVL_LESS); *addr = data_vr->vaddr + data_vr->length; bytes = *addr + *len; return real_brk(vmp, bytes); }
/*===========================================================================* * do_rs_prepare * *===========================================================================*/ int do_rs_prepare(message *m_ptr) { /* Prepare a new instance of a service for an upcoming live-update * switch, based on the old instance of this service. This call is * used only by RS and only for a multicomponent live update which * includes VM. In this case, all processes need to be prepared such * that they don't require the new VM instance to perform actions * during live update that cannot be undone in the case of a rollback. */ endpoint_t src_e, dst_e; int src_p, dst_p; struct vmproc *src_vmp, *dst_vmp; struct vir_region *src_data_vr, *dst_data_vr; vir_bytes src_addr, dst_addr; int sys_upd_flags; src_e = m_ptr->m_lsys_vm_update.src; dst_e = m_ptr->m_lsys_vm_update.dst; sys_upd_flags = m_ptr->m_lsys_vm_update.flags; /* Lookup slots for source and destination process. */ if(vm_isokendpt(src_e, &src_p) != OK) { printf("VM: do_rs_prepare: bad src endpoint %d\n", src_e); return EINVAL; } src_vmp = &vmproc[src_p]; if(vm_isokendpt(dst_e, &dst_p) != OK) { printf("VM: do_rs_prepare: bad dst endpoint %d\n", dst_e); return EINVAL; } dst_vmp = &vmproc[dst_p]; /* Pin memory for the source process. */ map_pin_memory(src_vmp); /* See if the source process has a larger heap than the destination * process. If so, extend the heap of the destination process to * match the source's. While this may end up wasting quite some * memory, it is absolutely essential that the destination process * does not run out of heap memory during the live update window, * and since most processes will be doing an identity transfer, they * are likely to require as much heap as their previous instances. * Better safe than sorry. TODO: prevent wasting memory somehow; * this seems particularly relevant for RS. */ src_data_vr = region_search(&src_vmp->vm_regions_avl, VM_MMAPBASE, AVL_LESS); assert(src_data_vr); dst_data_vr = region_search(&dst_vmp->vm_regions_avl, VM_MMAPBASE, AVL_LESS); assert(dst_data_vr); src_addr = src_data_vr->vaddr + src_data_vr->length; dst_addr = dst_data_vr->vaddr + dst_data_vr->length; if (src_addr > dst_addr) real_brk(dst_vmp, src_addr); /* Now also pin memory for the destination process. */ map_pin_memory(dst_vmp); /* Finally, map the source process's memory-mapped regions into the * destination process. This needs to happen now, because VM may not * allocate any objects during the live update window, since this * would prevent successful rollback of VM afterwards. The * destination may not actually touch these regions during the live * update window either, because they are mapped copy-on-write and a * pagefault would also cause object allocation. Objects are pages, * slab objects, anything in the new VM instance to which changes are * visible in the old VM basically. */ if (!(sys_upd_flags & SF_VM_NOMMAP)) map_proc_dyn_data(src_vmp, dst_vmp); return OK; }