int sys_umap_data_fb(endpoint_t ep, vir_bytes buf, vir_bytes len, phys_bytes *phys) { int r; if((r=sys_umap(ep, VM_D, buf, len, phys)) != OK) { if(r != EINVAL) return r; r = sys_umap(ep, D, buf, len, phys); } return r; }
/*===========================================================================* * sef_cb_init_fresh * *===========================================================================*/ PRIVATE int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info)) { /* Initialize the rtl8169 driver. */ unsigned left; /* Select a buffer that can safely be used for dma transfers. * Its absolute address is 'DmaPhys', the normal address is 'DmaPtr'. */ #if (CHIP == INTEL) DmaPtr = DmaBuffer; sys_umap(SELF, D, (vir_bytes)DmaBuffer, sizeof(DmaBuffer), &DmaPhys); if((left = dma_bytes_left(DmaPhys)) < DMA_SIZE) { /* First half of buffer crosses a 64K boundary, can't DMA into that */ DmaPtr += left; DmaPhys += left; } #else /* CHIP != INTEL */ panic("initialization failed: CHIP != INTEL: %d", 0); #endif /* CHIP == INTEL */ /* Announce we are up! */ driver_announce(); return(OK); }
char VAssert_Init(void) { uint32 eax, ebx, ecx, edx; VA page_address = (VA) &vassert_state.inReplay, ph; if (!VAssert_IsInVM()) { return -1; } bzero((char*) &vassert_state, sizeof vassert_state); #ifndef __minix /* Lock the page. */ if (mlock(&vassert_state, sizeof vassert_state)) { return -1; } #endif /* vmware expects a linear address (or is simply forgetting * to adjust the given address for segments) */ if(sys_umap(SELF, D, page_address, 1, (phys_bytes *) &ph)) { printf("VAssert_Init: sys_umap failed\n"); return -1; } libvassert_process_backdoor(CMD_SET_ADDRESS, ph, MAGIC_PORT|(1<<16), &eax, &ebx, &ecx, &edx); return (eax != -1) ? 0 : -1; }
PRIVATE int init_buffers(sub_dev_t *sub_dev_ptr) { #if (CHIP == INTEL) char *base; size_t size, off; unsigned left; u32_t i; phys_bytes ph; /* allocate dma buffer space */ size= sub_dev_ptr->DmaSize + 64 * 1024; off = base= alloc_contig(size, AC_ALIGN4K, &ph); if (!base) { error("%s: failed to allocate dma buffer for channel %d\n", drv.DriverName,i); return EIO; } sub_dev_ptr->DmaBuf= base; tell_dev((vir_bytes)base, size, 0, 0, 0); /* allocate extra buffer space */ if (!(sub_dev_ptr->ExtraBuf = malloc(sub_dev_ptr->NrOfExtraBuffers * sub_dev_ptr->DmaSize / sub_dev_ptr->NrOfDmaFragments))) { error("%s failed to allocate extra buffer for channel %d\n", drv.DriverName,i); return EIO; } sub_dev_ptr->DmaPtr = sub_dev_ptr->DmaBuf; i = sys_umap(SELF, D, (vir_bytes) sub_dev_ptr->DmaBuf, (phys_bytes) sizeof(sub_dev_ptr->DmaBuf), &(sub_dev_ptr->DmaPhys)); if (i != OK) { return EIO; } if ((left = dma_bytes_left(sub_dev_ptr->DmaPhys)) < sub_dev_ptr->DmaSize) { /* First half of buffer crosses a 64K boundary, * can't DMA into that */ sub_dev_ptr->DmaPtr += left; sub_dev_ptr->DmaPhys += left; } /* write the physical dma address and size to the device */ drv_set_dma(sub_dev_ptr->DmaPhys, sub_dev_ptr->DmaSize, sub_dev_ptr->Nr); return OK; #else /* CHIP != INTEL */ error("%s: init_buffer() failed, CHIP != INTEL", drv.DriverName); return EIO; #endif /* CHIP == INTEL */ }
unsigned long vir2phys( unsigned long x ) { int r; unsigned long value; if ( (r=sys_umap( SELF, VM_D, x, 4, &value )) != OK ) { printf("lance: umap of 0x%lx failed\n",x ); panic("sys_umap failed: %d", r); } return value; }
/*===========================================================================* * do_rdwt * *===========================================================================*/ PUBLIC int do_rdwt(message *mp) /* mp - pointer to read or write message */ { /* Carry out a single read or write request. */ int r, opcode; phys_bytes phys_addr; /* Disk address? Address and length of the user buffer? */ if (mp->COUNT < 0) return(EINVAL); /* Check the user buffer. */ sys_umap(mp->m_source, D, (vir_bytes) mp->ADDRESS, mp->COUNT, &phys_addr); if (phys_addr == 0) return(EFAULT); if(mp->COUNT>BUF_LEN) panic("CryptDrive","buffer is too small",s); printf("Request size is , %u bytes",mp->COUNT); /*debug*/ if(opcode == DEV_READ){ /*from here to caller*/ vir_bytes user_vir = (vir_bytes) mp->ADDRESS; mp->ADDRESS = (vir_bytes) buf; /* use my buffer */ mp->m_source=thispid; if(OK != sendrec(DRVR_PROC_NR, mp)) panic("CryptDrive","do_rd messaging failed",s); /* decrypt here - this line here */ sys_vircopy(SELF, D, buffer, proc_nr, D, user_vir, mp->COUNT); mp->m_source=thispid; if(OK != send(device_caller, mp)) panic("CryptDrive","do_wt messaging failed",s); } if(opcode == DEV_WRITE){ /*from caller to here*/ sys_vircopy(proc_nr, D, mp->ADDRESS, SELF, D, buffer, mp->COUNT); user_vir = mp->ADDRESS; mp->ADDRESS= (vir_bytes) buffer; /* use my buffer */ mp->m_source=thispid; if(OK != sendrec(DRVR_PROC_NR, mp)) panic("CryptDrive","do_wt messaging failed",s); mp->m_source=thispid; if(OK != send(device_caller, mp)) panic("CryptDrive","do_wt messaging failed",s); } return(OK); }
void *alloc_contig(size_t len, int flags, phys_bytes *phys) { vir_bytes buf; int mmapflags = MAP_PREALLOC|MAP_CONTIG|MAP_ANON; if(flags & AC_LOWER16M) mmapflags |= MAP_LOWER16M; if(flags & AC_LOWER1M) mmapflags |= MAP_LOWER1M; if(flags & AC_ALIGN64K) mmapflags |= MAP_ALIGN64K; /* First try to get memory with minix_mmap. This is guaranteed * to be page-aligned, and we can tell VM it has to be * pre-allocated and contiguous. */ errno = 0; buf = (vir_bytes) minix_mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0); /* If that failed, maybe we're not running in paged mode. * If that's the case, ENXIO will be returned. * Memory returned with malloc() will be preallocated and * contiguous, so fallback on that, and ask for a little extra * so we can page align it ourselves. */ if(buf == (vir_bytes) MAP_FAILED) { u32_t align = 0; if(errno != (_SIGN ENXIO)) { return NULL; } if(flags & AC_ALIGN4K) align = 4*1024; if(flags & AC_ALIGN64K) align = 64*1024; if(len + align < len) return NULL; len += align; if(!(buf = (vir_bytes) malloc(len))) { return NULL; } if(align) buf += align - (buf % align); } /* Get physical address, if requested. */ if(phys != NULL && sys_umap(SELF, VM_D, buf, len, phys) != OK) panic("sys_umap_data_fb failed"); return (void *) buf; }
PRIVATE int init(char *name) { struct reg86u reg86; int r; myname = name; r = sys_getbiosbuffer(&bios_buf_vir, &bios_buf_size); if (r != OK) panic(myname, "sys_getbiosbuffer failed", r); #ifdef ENABLE_DEBUG DEBUG report(myname, "bios_buf_vir", bios_buf_vir); DEBUG report(myname, "bios_buf_size", bios_buf_size); #endif r = sys_umap(SYSTEM, D, bios_buf_vir, (phys_bytes)bios_buf_size, &bios_buf_phys); if (r != OK) panic(myname, "sys_umap failed", r); if (bios_buf_phys + bios_buf_size > 0x100000) panic(myname, "bad BIOS buffer, phys", bios_buf_phys); #ifdef ENABLE_DEBUG DEBUG report(myname, "bios_buf_phys", bios_buf_phys); #endif current_mode = GFX_MODE_NONE; return 0; }
/*===========================================================================* * pt_init * *===========================================================================*/ void pt_init(void) { pt_t *newpt; int s, r, p; vir_bytes sparepages_mem; #if defined(__arm__) vir_bytes sparepagedirs_mem; #endif static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES]; int m = kernel_boot_info.kern_mod; #if defined(__i386__) int global_bit_ok = 0; u32_t mypdbr; /* Page Directory Base Register (cr3) value */ #elif defined(__arm__) u32_t myttbr; #endif /* Find what the physical location of the kernel is. */ assert(m >= 0); assert(m < kernel_boot_info.mods_with_kernel); assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS); kern_mb_mod = &kernel_boot_info.module_list[m]; kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start; assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE)); assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE)); kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE; /* Get ourselves spare pages. */ sparepages_mem = (vir_bytes) static_sparepages; assert(!(sparepages_mem % VM_PAGE_SIZE)); #if defined(__arm__) /* Get ourselves spare pagedirs. */ sparepagedirs_mem = (vir_bytes) static_sparepagedirs; assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE)); #endif /* Spare pages are used to allocate memory before VM has its own page * table that things (i.e. arbitrary physical memory) can be mapped into. * We get it by pre-allocating it in our bss (allocated and mapped in by * the kernel) in static_sparepages. We also need the physical addresses * though; we look them up now so they are ready for use. */ #if defined(__arm__) missing_sparedirs = 0; assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS); for(s = 0; s < SPAREPAGEDIRS; s++) { vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);; phys_bytes ph; if((r=sys_umap(SELF, VM_D, (vir_bytes) v, ARCH_PAGEDIR_SIZE, &ph)) != OK) panic("pt_init: sys_umap failed: %d", r); if(s >= STATIC_SPAREPAGEDIRS) { sparepagedirs[s].pagedir = NULL; missing_sparedirs++; continue; } sparepagedirs[s].pagedir = (void *) v; sparepagedirs[s].phys = ph; } #endif if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0))) panic("reservedqueue_new for single pages failed"); assert(STATIC_SPAREPAGES < SPAREPAGES); for(s = 0; s < STATIC_SPAREPAGES; s++) { void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE); phys_bytes ph; if((r=sys_umap(SELF, VM_D, (vir_bytes) v, VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK) panic("pt_init: sys_umap failed: %d", r); reservedqueue_add(spare_pagequeue, v, ph); } #if defined(__i386__) /* global bit and 4MB pages available? */ global_bit_ok = _cpufeature(_CPUF_I386_PGE); bigpage_ok = _cpufeature(_CPUF_I386_PSE); /* Set bit for PTE's and PDE's if available. */ if(global_bit_ok) global_bit = I386_VM_GLOBAL; #endif /* Now reserve another pde for kernel's own mappings. */ { int kernmap_pde; phys_bytes addr, len; int flags, index = 0; u32_t offset = 0; kernmap_pde = freepde(); offset = kernmap_pde * ARCH_BIG_PAGE_SIZE; while(sys_vmctl_get_mapping(index, &addr, &len, &flags) == OK) { int usedpde; vir_bytes vir; if(index >= MAX_KERNMAPPINGS) panic("VM: too many kernel mappings: %d", index); kern_mappings[index].phys_addr = addr; kern_mappings[index].len = len; kern_mappings[index].flags = flags; kern_mappings[index].vir_addr = offset; kern_mappings[index].flags = ARCH_VM_PTE_PRESENT; if(flags & VMMF_UNCACHED) #if defined(__i386__) kern_mappings[index].flags |= PTF_NOCACHE; #elif defined(__arm__) kern_mappings[index].flags |= ARM_VM_PTE_DEVICE; #endif if(flags & VMMF_USER) kern_mappings[index].flags |= ARCH_VM_PTE_USER; #if defined(__arm__) else kern_mappings[index].flags |= ARM_VM_PTE_SUPER; #endif if(flags & VMMF_WRITE) kern_mappings[index].flags |= ARCH_VM_PTE_RW; #if defined(__i386__) if(flags & VMMF_GLO) kern_mappings[index].flags |= I386_VM_GLOBAL; #elif defined(__arm__) else kern_mappings[index].flags |= ARCH_VM_PTE_RO; #endif if(addr % VM_PAGE_SIZE) panic("VM: addr unaligned: %d", addr); if(len % VM_PAGE_SIZE) panic("VM: len unaligned: %d", len); vir = offset; if(sys_vmctl_reply_mapping(index, vir) != OK) panic("VM: reply failed"); offset += len; index++; kernmappings++; usedpde = ARCH_VM_PDE(offset); while(usedpde > kernmap_pde) { int newpde = freepde(); assert(newpde == kernmap_pde+1); kernmap_pde = newpde; } } } /* Reserve PDEs available for mapping in the page directories. */ { int pd; for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) { struct pdm *pdm = &pagedir_mappings[pd]; pdm->pdeno = freepde(); phys_bytes ph; /* Allocate us a page table in which to * remember page directory pointers. */ if(!(pdm->page_directories = vm_allocpage(&ph, VMP_PAGETABLE))) { panic("no virt addr for vm mappings"); } memset(pdm->page_directories, 0, VM_PAGE_SIZE); pdm->phys = ph; #if defined(__i386__) pdm->val = (ph & ARCH_VM_ADDR_MASK) | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW; #elif defined(__arm__) pdm->val = (ph & ARCH_VM_PDE_MASK) | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME #endif } } /* Allright. Now. We have to make our own page directory and page tables, * that the kernel has already set up, accessible to us. It's easier to * understand if we just copy all the required pages (i.e. page directory * and page tables), and set up the pointers as if VM had done it itself. * * This allocation will happen without using any page table, and just * uses spare pages. */ newpt = &vmprocess->vm_pt; if(pt_new(newpt) != OK) panic("vm pt_new failed"); /* Get our current pagedir so we can see it. */ #if defined(__i386__) if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK) #elif defined(__arm__) if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK) #endif panic("VM: sys_vmctl_get_pdbr failed"); #if defined(__i386__) if(sys_vircopy(NONE, mypdbr, SELF, (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK) #elif defined(__arm__) if(sys_vircopy(NONE, myttbr, SELF, (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK) #endif panic("VM: sys_vircopy failed"); /* We have mapped in kernel ourselves; now copy mappings for VM * that kernel made, including allocations for BSS. Skip identity * mapping bits; just map in VM. */ for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) { u32_t entry = currentpagedir[p]; phys_bytes ptaddr_kern, ptaddr_us; /* BIGPAGEs are kernel mapping (do ourselves) or boot * identity mapping (don't want). */ if(!(entry & ARCH_VM_PDE_PRESENT)) continue; if((entry & ARCH_VM_BIGPAGE)) continue; if(pt_ptalloc(newpt, p, 0) != OK) panic("pt_ptalloc failed"); assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT); #if defined(__i386__) ptaddr_kern = entry & ARCH_VM_ADDR_MASK; ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK; #elif defined(__arm__) ptaddr_kern = entry & ARCH_VM_PDE_MASK; ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK; #endif /* Copy kernel-initialized pagetable contents into our * normally accessible pagetable. */ if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK) panic("pt_init: abscopy failed"); } /* Inform kernel vm has a newly built page table. */ assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR); pt_bind(newpt, &vmproc[VM_PROC_NR]); pt_init_done = 1; /* All OK. */ return; }
/*===========================================================================* * pt_init * *===========================================================================*/ PUBLIC void pt_init(phys_bytes usedlimit) { /* By default, the kernel gives us a data segment with pre-allocated * memory that then can't grow. We want to be able to allocate memory * dynamically, however. So here we copy the part of the page table * that's ours, so we get a private page table. Then we increase the * hardware segment size so we can allocate memory above our stack. */ pt_t *newpt; int s, r; vir_bytes v; phys_bytes lo, hi; vir_bytes extra_clicks; u32_t moveup = 0; int global_bit_ok = 0; int free_pde; int p; struct vm_ep_data ep_data; vir_bytes sparepages_mem; phys_bytes sparepages_ph; vir_bytes ptr; /* Shorthand. */ newpt = &vmprocess->vm_pt; /* Get ourselves spare pages. */ ptr = (vir_bytes) static_sparepages; ptr += I386_PAGE_SIZE - (ptr % I386_PAGE_SIZE); if(!(sparepages_mem = ptr)) panic("pt_init: aalloc for spare failed"); if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem, I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK) panic("pt_init: sys_umap failed: %d", r); missing_spares = 0; assert(STATIC_SPAREPAGES < SPAREPAGES); for(s = 0; s < SPAREPAGES; s++) { if(s >= STATIC_SPAREPAGES) { sparepages[s].page = NULL; missing_spares++; continue; } sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE); sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE; } /* global bit and 4MB pages available? */ global_bit_ok = _cpufeature(_CPUF_I386_PGE); bigpage_ok = _cpufeature(_CPUF_I386_PSE); /* Set bit for PTE's and PDE's if available. */ if(global_bit_ok) global_bit = I386_VM_GLOBAL; /* The kernel and boot time processes need an identity mapping. * We use full PDE's for this without separate page tables. * Figure out which pde we can start using for other purposes. */ id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE; /* We have to make mappings up till here. */ free_pde = id_map_high_pde+1; /* Initial (current) range of our virtual address space. */ lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys); hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys + vmprocess->vm_arch.vm_seg[S].mem_len); assert(!(lo % I386_PAGE_SIZE)); assert(!(hi % I386_PAGE_SIZE)); if(lo < VM_PROCSTART) { moveup = VM_PROCSTART - lo; assert(!(VM_PROCSTART % I386_PAGE_SIZE)); assert(!(lo % I386_PAGE_SIZE)); assert(!(moveup % I386_PAGE_SIZE)); } /* Make new page table for ourselves, partly copied * from the current one. */ if(pt_new(newpt) != OK) panic("pt_init: pt_new failed"); /* Set up mappings for VM process. */ for(v = lo; v < hi; v += I386_PAGE_SIZE) { phys_bytes addr; u32_t flags; /* We have to write the new position in the PT, * so we can move our segments. */ if(pt_writemap(vmprocess, newpt, v+moveup, v, I386_PAGE_SIZE, I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK) panic("pt_init: pt_writemap failed"); } /* Move segments up too. */ vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup); /* Allocate us a page table in which to remember page directory * pointers. */ if(!(page_directories = vm_allocpage(&page_directories_phys, VMP_PAGETABLE))) panic("no virt addr for vm mappings"); memset(page_directories, 0, I386_PAGE_SIZE); /* Increase our hardware data segment to create virtual address * space above our stack. We want to increase it to VM_DATATOP, * like regular processes have. */ extra_clicks = ABS2CLICK(VM_DATATOP - hi); vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks; /* We pretend to the kernel we have a huge stack segment to * increase our data segment. */ vmprocess->vm_arch.vm_data_top = (vmprocess->vm_arch.vm_seg[S].mem_vir + vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT; /* Where our free virtual address space starts. * This is only a hint to the VM system. */ newpt->pt_virtop = 0; /* Let other functions know VM now has a private page table. */ vmprocess->vm_flags |= VMF_HASPT; /* Now reserve another pde for kernel's own mappings. */ { int kernmap_pde; phys_bytes addr, len; int flags, index = 0; u32_t offset = 0; kernmap_pde = free_pde++; offset = kernmap_pde * I386_BIG_PAGE_SIZE; while(sys_vmctl_get_mapping(index, &addr, &len, &flags) == OK) { vir_bytes vir; if(index >= MAX_KERNMAPPINGS) panic("VM: too many kernel mappings: %d", index); kern_mappings[index].phys_addr = addr; kern_mappings[index].len = len; kern_mappings[index].flags = flags; kern_mappings[index].lin_addr = offset; kern_mappings[index].flags = I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE | global_bit; if(flags & VMMF_UNCACHED) kern_mappings[index].flags |= PTF_NOCACHE; if(addr % I386_PAGE_SIZE) panic("VM: addr unaligned: %d", addr); if(len % I386_PAGE_SIZE) panic("VM: len unaligned: %d", len); vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset); if(sys_vmctl_reply_mapping(index, vir) != OK) panic("VM: reply failed"); offset += len; index++; kernmappings++; } } /* Find a PDE below processes available for mapping in the * page directories (readonly). */ pagedir_pde = free_pde++; pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE; /* Tell kernel about free pde's. */ while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) { if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) { panic("VMCTL_I386_FREEPDE failed: %d", r); } } /* first pde in use by process. */ proc_pde = free_pde; /* Give our process the new, copied, private page table. */ pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */ pt_bind(newpt, vmprocess); /* new segment limit for the kernel after paging is enabled */ ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE; /* the memory map which must be installed after paging is enabled */ ep_data.mem_map = vmprocess->vm_arch.vm_seg; /* Now actually enable paging. */ if(sys_vmctl_enable_paging(&ep_data) != OK) panic("pt_init: enable paging failed"); /* Back to reality - this is where the stack actually is. */ vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks; /* Pretend VM stack top is the same as any regular process, not to * have discrepancies with new VM instances later on. */ vmprocess->vm_stacktop = VM_STACKTOP; /* All OK. */ return; }