コード例 #1
0
ファイル: mmap.c プロジェクト: rohsaini/mkunity
int
osfmach3_msync(
	struct vm_area_struct	*vmp,
	unsigned long		address,
	size_t			size,
	int			flags)
{
	kern_return_t	kr;
	struct osfmach3_mach_task_struct	*mach_task;
	vm_sync_t		sync_flags;

	mach_task = vmp->vm_mm->mm_mach_task;

	sync_flags = 0;
	if (flags & MS_ASYNC)
		sync_flags |= VM_SYNC_ASYNCHRONOUS;
	if (flags & MS_SYNC)
		sync_flags |= VM_SYNC_SYNCHRONOUS;
	if (flags & MS_INVALIDATE)
		sync_flags |= VM_SYNC_INVALIDATE;

	server_thread_blocking(FALSE);
	kr = vm_msync(mach_task->mach_task_port,
		      (vm_address_t) address,
		      (vm_size_t) size,
		      sync_flags);
	server_thread_unblocking(FALSE);

	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("osfmach3_msync(%p,%ld,%ld,0x%x): "
			     "vm_msync",
			     vmp, address, (unsigned long) size, flags));
		return -EIO;
	}

	return 0;
}
コード例 #2
0
ファイル: mmap.c プロジェクト: rohsaini/mkunity
void
osfmach3_exit_mmap(
	struct mm_struct * mm)
{
	struct osfmach3_mach_task_struct *mach_task;
	kern_return_t	kr;

	mach_task = mm->mm_mach_task;

	user_memory_flush_task(mach_task);

	/* flush the memory out of the kernel cache */
	server_thread_blocking(FALSE);
	kr = vm_msync(mach_task->mach_task_port,
		      VM_MIN_ADDRESS,
		      VM_MAX_ADDRESS - VM_MIN_ADDRESS,
		      VM_SYNC_SYNCHRONOUS);
	server_thread_unblocking(FALSE);
	if (kr != KERN_SUCCESS) {
		if (kr != MACH_SEND_INVALID_DEST &&
		    kr != KERN_INVALID_ARGUMENT) {
			MACH3_DEBUG(1, kr, ("osfmach3_exit_mmap: vm_msync"));
		}
	}

	kr = vm_deallocate(mach_task->mach_task_port,
			   VM_MIN_ADDRESS,
			   VM_MAX_ADDRESS - VM_MIN_ADDRESS);
	if (kr != KERN_SUCCESS) {
		if (kr != MACH_SEND_INVALID_DEST &&
		    kr != KERN_INVALID_ARGUMENT) {
			MACH3_DEBUG(1, kr,
				    ("osfmach3_exit_mmap: vm_deallocate"));
		}
	}
}
コード例 #3
0
ファイル: pass2.c プロジェクト: bihai/xchain
/*
 * pass2() creates the output file and the memory buffer to create the file
 * into.  It drives the process to get everything copied into the buffer for
 * the output file.  It then writes the output file and deallocates the buffer.
 */ 
extern
void
pass2(void)
{
    unsigned long i, j, section_type;
    struct object_list *object_list, **p;
#ifndef RLD
    int mode;
    struct stat stat_buf;
    kern_return_t r;

	/*
	 * In UNIX standard conformance mode we are not allowed to replace
	 * a file that is not writeable.
	 */
	if(get_unix_standard_mode() == TRUE && 
	   access(outputfile, F_OK) == 0 &&
	   access(outputfile, W_OK) == -1)
	    system_fatal("can't write output file: %s", outputfile);

	/*
	 * Create the output file.  The unlink() is done to handle the problem
	 * when the outputfile is not writable but the directory allows the
	 * file to be removed (since the file may not be there the return code
	 * of the unlink() is ignored).
	 */
	(void)unlink(outputfile);
	if((fd = open(outputfile, O_WRONLY | O_CREAT | O_TRUNC, 0777)) == -1)
	    system_fatal("can't create output file: %s", outputfile);
#ifdef F_NOCACHE
        /* tell filesystem to NOT cache the file when reading or writing */
	(void)fcntl(fd, F_NOCACHE, 1);
#endif
	if(fstat(fd, &stat_buf) == -1)
	    system_fatal("can't stat file: %s", outputfile);
	/*
	 * Turn the execute bits on or off depending if there are any undefined
	 * symbols in the output file.  If the file existed before the above
	 * open() call the creation mode in that call would have been ignored
	 * so it has to be set explicitly in any case.
	 */
	if(output_mach_header.flags & MH_NOUNDEFS ||
	   (has_dynamic_linker_command && output_for_dyld))
	    mode = (stat_buf.st_mode & 0777) | (0111 & ~umask(0));
	else
	    mode = (stat_buf.st_mode & 0777) & ~0111;
	if(fchmod(fd, mode) == -1)
	    system_fatal("can't set execution permissions output file: %s",
			 outputfile);

	/*
	 * Create the buffer to copy the parts of the output file into.
	 */
	if((r = vm_allocate(mach_task_self(), (vm_address_t *)&output_addr,
			    output_size, TRUE)) != KERN_SUCCESS)
	    mach_fatal(r, "can't vm_allocate() buffer for output file of size "
		       "%lu", output_size);

	/*
	 * Set up for flushing pages to the output file as they fill up.
	 */
	if(flush)
	    setup_output_flush();

	/*
	 * Make sure pure_instruction sections are padded with nop's.
	 */
	nop_pure_instruction_scattered_sections();

#endif /* !defined(RLD) */

	/*
	 * The strings indexes for the merged string blocks need to be set
	 * before the dylib tables are output because the module names are in
	 * them as well as the merged symbol names.
	 */
	set_merged_string_block_indexes();

#ifndef RLD
	/*
	 * Copy the dylib tables into the output file.  This is done before the
	 * sections are outputted so that the indexes to the local and external
	 * relocation entries for each object can be used as running indexes as
	 * each section in the object is outputted.
	 */
	if(filetype == MH_DYLIB)
	    output_dylib_tables();
#endif /* !defined(RLD) */

	/*
	 * Create the array of pointers to merged sections in the output file
	 * so the relocation routines can use it to set the 'referenced' fields
	 * in the merged section structures.
	 */
	create_output_sections_array();

	/*
	 * Copy the merged literal sections and the sections created from files
	 * into the output object file.
	 */
	output_literal_sections();
#ifndef RLD
	output_sections_from_files();
#endif /* !defined(RLD) */

	/*
	 * For each non-literal content section in each object file loaded 
	 * relocate it into the output file (along with the relocation entries).
	 * Then relocate local symbols into the output file for the loaded
	 * objects.
	 */
	for(p = &objects; *p; p = &(object_list->next)){
	    object_list = *p;
	    for(i = 0; i < object_list->used; i++){
		cur_obj = &(object_list->object_files[i]);
		/* print the object file name if tracing */
		if(trace){
		    print_obj_name(cur_obj);
		    print("\n");
		}
		if(cur_obj->dylib)
		    continue;
		if(cur_obj->bundle_loader)
		    continue;
		if(cur_obj->dylinker)
		    continue;
		if(cur_obj != base_obj){
		    for(j = 0; j < cur_obj->nsection_maps; j++){
			if(cur_obj->section_maps[j].s->flags & S_ATTR_DEBUG)
			    continue;
#ifdef RLD
			if(cur_obj->set_num == cur_set)
#endif /* RLD */
			{
			    section_type = (cur_obj->section_maps[j].s->flags &
                                                   SECTION_TYPE);
			    if(section_type == S_REGULAR ||
			       section_type == S_SYMBOL_STUBS ||
			       section_type == S_NON_LAZY_SYMBOL_POINTERS ||
			       section_type == S_LAZY_SYMBOL_POINTERS ||
			       section_type == S_COALESCED ||
			       section_type == S_MOD_INIT_FUNC_POINTERS ||
			       section_type == S_MOD_TERM_FUNC_POINTERS){
				output_section(&(cur_obj->section_maps[j]));
			    }
			}
		    }
		}
		output_local_symbols();
#if defined(VM_SYNC_DEACTIVATE) && !defined(_POSIX_C_SOURCE) && !defined(__CYGWIN__)
		vm_msync(mach_task_self(), (vm_address_t)cur_obj->obj_addr,
			 (vm_size_t)cur_obj->obj_size, VM_SYNC_DEACTIVATE);
#endif /* VM_SYNC_DEACTIVATE */
	    }
	}
	/*
	 * If there were errors in output_section() then return as so not
	 * to cause later internal errors.
	 */
	if(errors != 0)
	    return;

#ifdef RLD
	/*
	 * For each content section clean up the data structures not needed
	 * after rld is run.  This must be done after ALL the sections are
	 * output'ed because the fine relocation entries could be used by any
	 * of the sections.
	 */
	for(p = &objects; *p; p = &(object_list->next)){
	    object_list = *p;
	    for(i = 0; i < object_list->used; i++){
		cur_obj = &(object_list->object_files[i]);
		for(j = 0; j < cur_obj->nsection_maps; j++){
		    if(cur_obj->section_maps[j].nfine_relocs != 0){
			free(cur_obj->section_maps[j].fine_relocs);
			cur_obj->section_maps[j].fine_relocs = NULL;
			cur_obj->section_maps[j].nfine_relocs = 0;
		    }
		}
		if(cur_obj->nundefineds != 0){
		    free(cur_obj->undefined_maps);
		    cur_obj->undefined_maps = NULL;
		    cur_obj->nundefineds = 0;
		}
	    }
	}
#endif /* RLD */

	/*
	 * Set the SG_NORELOC flag in the segments that had no relocation to
	 * or for them.
	 */
	set_SG_NORELOC_flags();

#ifndef SA_RLD
	/*
	 * Copy the indirect symbol table into the output file.
	 */
	output_indirect_symbols();
#endif /* SA_RLD */

	/*
	 * Copy the merged symbol table into the output file.
	 */
	output_merged_symbols();

	/*
	 * Copy the headers into the output file.
	 */
	output_headers();

#ifndef RLD
	if(flush){
	    /*
	     * Flush the sections that have been scatter loaded.
	     */
	    flush_scatter_copied_sections();
	    /*
	     * flush the remaining part of the object file that is not a full
	     * page.
	     */
	    final_output_flush();
	}
	else{
	    /*
	     * Write the entire object file.
	     */
	    if(write(fd, output_addr, output_size) != (int)output_size)
		system_fatal("can't write output file");

	    if((r = vm_deallocate(mach_task_self(), (vm_address_t)output_addr,
				  output_size)) != KERN_SUCCESS)
		mach_fatal(r, "can't vm_deallocate() buffer for output file");
	}
#ifdef F_NOCACHE
	/* re-enable caching of file reads/writes */
	(void)fcntl(fd, F_NOCACHE, 0);
#endif
	if(close(fd) == -1)
	    system_fatal("can't close output file");
#endif /* RLD */
}
コード例 #4
0
ファイル: mmap.c プロジェクト: rohsaini/mkunity
void
osfmach3_remove_shared_vm_struct(
	struct vm_area_struct	*mpnt)
{
	struct osfmach3_mach_task_struct	*mach_task;
	struct mm_struct	*mm;
	kern_return_t		kr;
	struct vm_area_struct	*vmp;

#ifdef	VMA_DEBUG
	if (vma_debug) {
		printk("VMA:osfmach3_remove_shared_vm_struct: mpnt=0x%p\n",
		       mpnt);
	}
#endif	/* VMA_DEBUG */

	mm = mpnt->vm_mm;
	mach_task = mm->mm_mach_task;
	ASSERT(mach_task == current->osfmach3.task);

	vmp = find_vma(mm, mpnt->vm_start);
	if (vmp != NULL && vmp != mpnt && vmp->vm_start <= mpnt->vm_start) {
		/*
		 * There's another vm_area overlapping the removed one...
		 * This removal is probably the result of a 
		 * merge_segments(): that doesn't change anything to
		 * the VM layout.
		 */
		return;
	}

	if (mpnt->vm_inode) {
		/* mapped file: release a reference on the mem_object */
		inode_pager_release(mpnt->vm_inode);
	}

	if (mm->mmap == NULL) {
		/*
		 * osfmach3_exit_mmap was called before and
		 * cleaned the entire address space...
		 */
		return;
	}
#ifdef	VMA_DEBUG
	if (vma_debug) {
		printk("VMA: vm_deallocate(0x%x, 0x%lx, 0x%lx)\n",
		       mach_task->mach_task_port,
		       mpnt->vm_start,
		       mpnt->vm_end - mpnt->vm_start);
	}
#endif	/* VMA_DEBUG */

	if (mpnt->vm_inode) {
		/* mapped file: flush the object out of the cache */
		server_thread_blocking(FALSE);
		kr = vm_msync(mach_task->mach_task_port,
			      (vm_address_t) mpnt->vm_start,
			      (vm_size_t) (mpnt->vm_end - mpnt->vm_start),
			      VM_SYNC_SYNCHRONOUS);
		server_thread_unblocking(FALSE);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(1, kr,
				    ("osfmach3_remove_share_vm_struct: "
				     "vm_msync"));
		}
	}

	kr = vm_deallocate(mach_task->mach_task_port,
			   (vm_address_t) mpnt->vm_start,
			   (vm_size_t) (mpnt->vm_end - mpnt->vm_start));
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("osfmach3_remove_share_vm_struct: vm_deallocate"));
		printk("osfmach3_remove_shared_vm_struct: can't deallocate\n");
	}
	/*
	 * Flush the copyin/copyout cache.	
	 */
	user_memory_flush_area(mach_task,
			       (vm_address_t) mpnt->vm_start,
			       (vm_address_t) mpnt->vm_end);
}
コード例 #5
0
// entry point from CreatePatch()
void * __create_patch( void * in_fn_addr, void * in_patch_addr )
{
    void * result = NULL;

    if ( !mutex_inited )
        initialize_patch_mutexes( );

    // don't do ANYTHING unless we know we're not infringing on something else
    pthread_mutex_lock( &patch_mutex );

    // not much point doing anything else if we can't get write access to patch the function...
    if ( !__make_writable( in_fn_addr ) )
    {
        // multiple return paths - EEUURRRRGHHH !!!
        pthread_mutex_unlock( &patch_mutex );
        return ( NULL );
    }

    // allocate memory for jump tables
    if ( high_jump_table == 0 )	// either both will be allocated, or neither
    {
        allocate_jump_tables( );
    }

    if ( high_jump_table != 0 )
    {
        // Okay, we need:
        //
        // The first instruction from the function we're about to patch.
        // The address of the entry in the low memory jump table
        // The address of the function to patch
        // The address of the patch function
        // The address of the entry in the high memory jump table
        // The address of the bit of the high jump table entry which refers back to the target fn
        //

        unsigned char saved_instr[32];
        unsigned char new_instr[32];
        vm_address_t fn_addr = ( vm_address_t ) in_fn_addr;
        vm_address_t patch_addr = ( vm_address_t ) in_patch_addr;
        vm_address_t low_entry = low_jump_table + low_table_offset;
        vm_address_t high_entry = high_jump_table + high_table_offset;
        size_t saved_size, low_size, high_size;

        // calculate size of instructions to save off, and generate
        // replacement instruction padded with no-ops
        if ( __calc_insn_size( in_fn_addr, (void *) (high_entry + 8),
                               new_instr, &saved_size ) )
        {
            // can't really do this atomically -- we could be reading
            // twenty-odd bytes here...
            memcpy( saved_instr, in_fn_addr, saved_size );

            // generate reentry island
            low_size = build_low_entry( low_entry, fn_addr + saved_size,
                                        saved_instr, saved_size );

            if ( low_size > 0 )
            {
                // generate patch island
                high_size = build_high_entry( high_entry, low_entry, patch_addr );
            }

	    // ensure we have valid blocks, and that they'll both fit
	    // into the tables
            if ( ( (low_size > 0) && (high_size > 0) ) &&
		 ( ( high_table_offset + high_size ) <= high_table_size ) &&
		 ( ( low_table_offset + low_size ) <= low_table_size ) )
            {
                // Ideally we want to use an atomic operation here, and
                // one which will allow us to re-save the initial
                // instruction block should it have changed in the
                // meantime

                // If the data is less than eight bytes in length, then
                // we can use a 64-bit cmpxchg instruction.

                // Unfortunately, anything more than that can't be done
                // atomically -- at least, not easily -- so we have to
                // rely on our mutexes, and hope nothing else (Unsanity,
                // mach_override) gets in the way during our
                // operation...

                if ( saved_size <= 8 )
                {
                    // we will write eight bytes at once, so fill out
                    // the end of new_instr as necessary
                    unsigned long long oldVal, newVal;
                    unsigned long long * addr = (unsigned long long *) in_fn_addr;

                    do
                    {
                        newVal = oldVal = *addr;
                        memcpy( &newVal, new_instr, saved_size );

                        // newVal now contains new_instr padded with
                        // some bytes which aren't to be changed
                        if ( DPCompareAndSwap64( oldVal, newVal, addr ) == 0 )
                        {
                            // it's changed beneath us

                            // recalculate instructions
                            // *pray* this call doesn't fail. It
                            // shouldn't ever do that
                            __calc_insn_size( in_fn_addr, (void *) (high_entry+8),
                                              new_instr, &saved_size );

                            // re-save instructions
                            memcpy( saved_instr, in_fn_addr, saved_size );

                            // re-generate reentry island
                            low_size = build_low_entry( low_entry, fn_addr + saved_size,
                                saved_instr, saved_size );
                        }
                        else
                        {
                            // all done
                            break;
                        }

                    } while ( saved_size < 8 );
                }

                // a change might have made the saved instructions get
                // larger than eight bytes, so we check again here.
                if ( saved_size > 8 )
                {
                    // copy the padded ljmp instruction into the target function...
                    memcpy( in_fn_addr, new_instr, saved_size );
                }

                // call msync() on each & update table offsets - flushes instruction cache
                vm_msync( mach_task_self( ), low_entry,
                          low_size, VM_SYNC_INVALIDATE | VM_SYNC_SYNCHRONOUS );
                vm_msync( mach_task_self( ), high_entry, 
                          high_size, VM_SYNC_INVALIDATE | VM_SYNC_SYNCHRONOUS );

                low_table_offset += low_size;
                high_table_offset += high_size;

                DPCodeSync( in_fn_addr );

                // set result - addr is address of first *instruction* in the new low addr table entry
                result = (void *) (low_entry + 8);
            }
        }
    }

    pthread_mutex_unlock( &patch_mutex );

    return ( result );
}