kern_return_t vnode_pager_data_request( memory_object_t mem_obj, memory_object_offset_t offset, __unused vm_size_t length, __unused vm_prot_t desired_access, memory_object_fault_info_t fault_info) { register vnode_pager_t vnode_object; vm_size_t size; #if MACH_ASSERT memory_object_offset_t original_offset = offset; #endif /* MACH_ASSERT */ vnode_object = vnode_pager_lookup(mem_obj); size = MAX_UPL_TRANSFER * PAGE_SIZE; if (memory_object_cluster_size(vnode_object->control_handle, &offset, &size, fault_info) != KERN_SUCCESS) size = PAGE_SIZE; assert(original_offset >= offset && original_offset < offset + size); return vnode_pager_cluster_read(vnode_object, offset, size); }
kern_return_t vnode_pager_data_request( memory_object_t mem_obj, memory_object_offset_t offset, __unused memory_object_cluster_size_t length, __unused vm_prot_t desired_access, memory_object_fault_info_t fault_info) { vnode_pager_t vnode_object; memory_object_offset_t base_offset; vm_size_t size; uint32_t io_streaming = 0; vnode_object = vnode_pager_lookup(mem_obj); size = MAX_UPL_TRANSFER * PAGE_SIZE; base_offset = offset; if (memory_object_cluster_size(vnode_object->control_handle, &base_offset, &size, &io_streaming, fault_info) != KERN_SUCCESS) size = PAGE_SIZE; assert(offset >= base_offset && offset < base_offset + size); return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size); }
void vnode_pager_reference( memory_object_t mem_obj) { register vnode_pager_t vnode_object; unsigned int new_ref_count; vnode_object = vnode_pager_lookup(mem_obj); new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1); assert(new_ref_count > 1); }
kern_return_t vnode_pager_unmap( memory_object_t mem_obj) { register vnode_pager_t vnode_object; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %p\n", mem_obj)); vnode_object = vnode_pager_lookup(mem_obj); ubc_unmap(vnode_object->vnode_handle); return KERN_SUCCESS; }
kern_return_t vnode_pager_get_isSSD( memory_object_t mem_obj, boolean_t *isSSD) { vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) return KERN_INVALID_ARGUMENT; vnode_object = vnode_pager_lookup(mem_obj); *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle); return KERN_SUCCESS; }
kern_return_t vnode_pager_cs_check_validation_bitmap( memory_object_t mem_obj, memory_object_offset_t offset, int optype ) { vnode_pager_t vnode_object; if (mem_obj == MEMORY_OBJECT_NULL || mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; } vnode_object = vnode_pager_lookup(mem_obj); return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype ); }
kern_return_t vnode_pager_check_hard_throttle( memory_object_t mem_obj, uint32_t *limit, uint32_t hard_throttle) { vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) return KERN_INVALID_ARGUMENT; vnode_object = vnode_pager_lookup(mem_obj); (void)vnode_pager_return_hard_throttle_limit(vnode_object->vnode_handle, limit, hard_throttle); return KERN_SUCCESS; }
kern_return_t vnode_pager_get_object_filename( memory_object_t mem_obj, const char **filename) { vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; } vnode_object = vnode_pager_lookup(mem_obj); return vnode_pager_get_filename(vnode_object->vnode_handle, filename); }
kern_return_t vnode_pager_get_object_size( memory_object_t mem_obj, memory_object_offset_t *length) { vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { *length = 0; return KERN_INVALID_ARGUMENT; } vnode_object = vnode_pager_lookup(mem_obj); *length = vnode_pager_get_filesize(vnode_object->vnode_handle); return KERN_SUCCESS; }
kern_return_t vnode_pager_synchronize( memory_object_t mem_obj, memory_object_offset_t offset, vm_size_t length, __unused vm_sync_t sync_flags) { register vnode_pager_t vnode_object; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %p\n", mem_obj)); vnode_object = vnode_pager_lookup(mem_obj); memory_object_synchronize_completed(vnode_object->control_handle, offset, length); return (KERN_SUCCESS); }
kern_return_t vnode_pager_get_isinuse( memory_object_t mem_obj, uint32_t *isinuse) { vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { *isinuse = 1; return KERN_INVALID_ARGUMENT; } vnode_object = vnode_pager_lookup(mem_obj); *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle); return KERN_SUCCESS; }
kern_return_t vnode_pager_get_object_cs_blobs( memory_object_t mem_obj, void **blobs) { vnode_pager_t vnode_object; if (mem_obj == MEMORY_OBJECT_NULL || mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; } vnode_object = vnode_pager_lookup(mem_obj); return vnode_pager_get_cs_blobs(vnode_object->vnode_handle, blobs); }
void vnode_pager_deallocate( memory_object_t mem_obj) { register vnode_pager_t vnode_object; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj)); vnode_object = vnode_pager_lookup(mem_obj); if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) { if (vnode_object->vnode_handle != NULL) { vnode_pager_vrele(vnode_object->vnode_handle); } zfree(vnode_pager_zone, vnode_object); } return; }
kern_return_t vnode_pager_get_object_pathname( memory_object_t mem_obj, char *pathname, vm_size_t *length_p) { vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; } vnode_object = vnode_pager_lookup(mem_obj); return vnode_pager_get_pathname(vnode_object->vnode_handle, pathname, length_p); }
kern_return_t vnode_pager_get_object_vnode ( memory_object_t mem_obj, uint32_t * vnodeaddr, uint32_t * vid) { vnode_pager_t vnode_object; vnode_object = vnode_pager_lookup(mem_obj); if (vnode_object->vnode_handle) { *vnodeaddr = (uint32_t)vnode_object->vnode_handle; *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle); return(KERN_SUCCESS); } return(KERN_FAILURE); }
kern_return_t vnode_pager_data_return( memory_object_t mem_obj, memory_object_offset_t offset, vm_size_t data_cnt, memory_object_offset_t *resid_offset, int *io_error, __unused boolean_t dirty, __unused boolean_t kernel_copy, int upl_flags) { register vnode_pager_t vnode_object; vnode_object = vnode_pager_lookup(mem_obj); vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags); return KERN_SUCCESS; }
kern_return_t vnode_pager_init(memory_object_t mem_obj, memory_object_control_t control, #if !DEBUG __unused #endif vm_size_t pg_size) { vnode_pager_t vnode_object; kern_return_t kr; memory_object_attr_info_data_t attributes; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size)); if (control == MEMORY_OBJECT_CONTROL_NULL) return KERN_INVALID_ARGUMENT; vnode_object = vnode_pager_lookup(mem_obj); memory_object_control_reference(control); vnode_object->control_handle = control; attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ attributes.cluster_size = (1 << (PAGE_SHIFT)); attributes.may_cache_object = TRUE; attributes.temporary = TRUE; kr = memory_object_change_attributes( control, MEMORY_OBJECT_ATTRIBUTE_INFO, (memory_object_info_t) &attributes, MEMORY_OBJECT_ATTR_INFO_COUNT); if (kr != KERN_SUCCESS) panic("vnode_pager_init: memory_object_change_attributes() failed"); return(KERN_SUCCESS); }
kern_return_t vnode_pager_map( memory_object_t mem_obj, vm_prot_t prot) { vnode_pager_t vnode_object; int ret; kern_return_t kr; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot)); vnode_object = vnode_pager_lookup(mem_obj); ret = ubc_map(vnode_object->vnode_handle, prot); if (ret != 0) { kr = KERN_FAILURE; } else { kr = KERN_SUCCESS; } return kr; }