/* * pmap_copy_page copies the specified (machine independent) pages. */ void pmap_copy_part_page( ppnum_t psrc, vm_offset_t src_offset, ppnum_t pdst, vm_offset_t dst_offset, vm_size_t len) { pmap_paddr_t src, dst; assert(psrc != vm_page_fictitious_addr); assert(pdst != vm_page_fictitious_addr); assert(psrc != vm_page_guard_addr); assert(pdst != vm_page_guard_addr); src = i386_ptob(psrc); dst = i386_ptob(pdst); assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE); assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK), (addr64_t)dst + (dst_offset & INTEL_OFFMASK), len); }
unsigned kdp_vm_read( caddr_t src, caddr_t dst, unsigned len) { addr64_t cur_virt_src, cur_virt_dst; addr64_t cur_phys_src, cur_phys_dst; unsigned resid, cnt; unsigned int dummy; pmap_t pmap; #ifdef KDP_VM_READ_DEBUG kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]); #endif cur_virt_src = (addr64_t)((unsigned int)src | (((uint64_t)kdp_src_high32) << 32)); cur_virt_dst = (addr64_t)((unsigned int)dst); if (kdp_trans_off) { resid = len; /* Get the length to copy */ while (resid != 0) { if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) goto exit; if(kdp_read_io == 0) if(!mapping_phys_lookup((ppnum_t)(cur_virt_src >> 12), &dummy)) return 0; /* Can't read where there's not any memory */ cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */ if (cnt > (4096 - (cur_virt_dst & 0xFFF))) cnt = 4096 - (cur_virt_dst & 0xFFF); if (cnt > resid) cnt = resid; bcopy_phys(cur_virt_src, cur_phys_dst, cnt); /* Copy stuff over */ cur_virt_src += cnt; cur_virt_dst += cnt; resid -= cnt; } } else {
static void mdevstrategy(struct buf *bp) { unsigned int left, lop, csize; vm_offset_t vaddr, blkoff; int devid; addr64_t paddr, fvaddr; ppnum_t pp; devid = minor(buf_device(bp)); /* Get minor device number */ if ((mdev[devid].mdFlags & mdInited) == 0) { /* Have we actually been defined yet? */ buf_seterror(bp, ENXIO); buf_biodone(bp); return; } buf_setresid(bp, buf_count(bp)); /* Set byte count */ blkoff = buf_blkno(bp) * mdev[devid].mdSecsize; /* Get offset into file */ /* * Note that reading past end is an error, but reading at end is an EOF. For these * we just return with resid == count. */ if (blkoff >= (mdev[devid].mdSize << 12)) { /* Are they trying to read/write at/after end? */ if(blkoff != (mdev[devid].mdSize << 12)) { /* Are we trying to read after EOF? */ buf_seterror(bp, EINVAL); /* Yeah, this is an error */ } buf_biodone(bp); /* Return */ return; } if ((blkoff + buf_count(bp)) > (mdev[devid].mdSize << 12)) { /* Will this read go past end? */ buf_setcount(bp, ((mdev[devid].mdSize << 12) - blkoff)); /* Yes, trim to max */ } /* * make sure the buffer's data area is * accessible */ if (buf_map(bp, (caddr_t *)&vaddr)) panic("ramstrategy: buf_map failed\n"); fvaddr = (mdev[devid].mdBase << 12) + blkoff; /* Point to offset into ram disk */ if (buf_flags(bp) & B_READ) { /* Is this a read? */ if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ bcopy((void *)((uintptr_t)fvaddr), (void *)vaddr, (size_t)buf_count(bp)); /* This is virtual, just get the data */ } else { left = buf_count(bp); /* Init the amount left to copy */ while(left) { /* Go until it is all copied */ lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ csize = min(lop, left); /* Don't move more than we need to */ pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr)); /* Get the sink physical address */ if(!pp) { /* Not found, what gives? */ panic("mdevstrategy: sink address %016llX not mapped\n", (addr64_t)((uintptr_t)vaddr)); } paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ bcopy_phys(fvaddr, paddr, csize); /* Copy this on in */ mapping_set_mod(paddr >> 12); /* Make sure we know that it is modified */ left = left - csize; /* Calculate what is left */ vaddr = vaddr + csize; /* Move to next sink address */ fvaddr = fvaddr + csize; /* Bump to next physical address */ } } } else { /* This is a write */ if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */
kern_return_t copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which) { unsigned int lop, csize; int bothphys = 0; KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64, (unsigned)snk64, size, which, 0); if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */ panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk)) bothphys = 1; /* both are physical */ while (size) { if (bothphys) { lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */ if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)))) lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */ } else { /* * only need to compute the resid for the physical page * address... we don't care about where we start/finish in * the virtual since we just call the normal copyin/copyout */ if (which & cppvPsrc) lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); else lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); } csize = size; /* Assume we can copy it all */ if (lop < size) csize = lop; /* Nope, we can't do it all */ #if 0 /* * flush_dcache64 is currently a nop on the i386... * it's used when copying to non-system memory such * as video capture cards... on PPC there was a need * to flush due to how we mapped this memory... not * sure if it's needed on i386. */ if (which & cppvFsrc) flush_dcache64(src64, csize, 1); /* If requested, flush source before move */ if (which & cppvFsnk) flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */ #endif if (bothphys) bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */ else { if (copyio_phys(src64, snk64, csize, which)) return (KERN_FAILURE); } #if 0 if (which & cppvFsrc) flush_dcache64(src64, csize, 1); /* If requested, flush source after move */ if (which & cppvFsnk) flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */ #endif size -= csize; /* Calculate what is left */ snk64 += csize; /* Bump sink to next physical address */ src64 += csize; /* Bump source to next physical address */ } KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64, (unsigned)snk64, size, which, 0); return KERN_SUCCESS; }
void cpu_machine_idle_init(boolean_t from_boot) { static const unsigned int *BootArgs_paddr = (unsigned int *)NULL; static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL; static unsigned int resume_idle_cpu_paddr = (unsigned int )NULL; cpu_data_t *cpu_data_ptr = getCpuDatap(); if (from_boot) { unsigned int jtag = 0; unsigned int wfi; if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) { if (jtag != 0) idle_enable = FALSE; else idle_enable = TRUE; } else idle_enable = TRUE; if (!PE_parse_boot_argn("wfi", &wfi, sizeof (wfi))) wfi = 1; if (wfi == 0) bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop), (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned)); if (wfi == 2) wfi_fast = 0; LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE); /* Copy Exception Vectors low, but don't touch the sleep token */ bcopy((void *)&ExceptionLowVectorsBase, (void *)LowExceptionVectorsAddr, 0x90); bcopy(((void *)(((vm_offset_t)&ExceptionLowVectorsBase) + 0xA0)), ((void *)(((vm_offset_t)LowExceptionVectorsAddr) + 0xA0)), ARM_PGBYTES - 0xA0); start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu); BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs); bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr), (addr64_t)((unsigned int)(gPhysBase) + ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)), 4); CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries); bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr), (addr64_t)((unsigned int)(gPhysBase) + ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)), 4); CleanPoC_DcacheRegion((vm_offset_t) phystokv(gPhysBase), PAGE_SIZE); resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu); } if (cpu_data_ptr == &BootCpuData) { bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE); }; cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr; clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); }