コード例 #1
0
void
myNMHandler( NMRecPtr pNM )
{
    do {
        DVIdle();
    } while( CompareAndSwap( true, false, &(gpFamilyGlobals->nmIsInstalled) ) );

    // until next time...
    NMRemove( pNM );
}
コード例 #2
0
ファイル: SortStringArray.cpp プロジェクト: kanbang/SVN
void CSortStringArray::Sort()
{
	BOOL bNotDone = TRUE;
	while (bNotDone)
	{
		bNotDone = FALSE;
		for (int pos = 0; pos < GetUpperBound(); pos++)
		{
			bNotDone |= CompareAndSwap(pos);
		}
	}
}
コード例 #3
0
ファイル: memobj.c プロジェクト: RobertHarper/TILT-Compiler
void
Stacklet_KillReplica(Stacklet_t* stacklet)
{
	assert(stacklet->count > 0);
	if (stacklet->state == Pending)
		Stacklet_Copy(stacklet);
	while (stacklet->state == Copying)
		;
	CompareAndSwap((int*) &stacklet->state, InactiveCopied, ActiveCopied);
	assert(stacklet->state == Inconsistent
		|| stacklet->state == ActiveCopied);
}
コード例 #4
0
ファイル: memobj.c プロジェクト: RobertHarper/TILT-Compiler
static Stacklet_t*
Stacklet_Alloc(StackChain_t* stackChain)
{
	int i;
	Stacklet_t *res = NULL;
	/*
		Each stacklet contains the primary and replica.  Each one
		starts with a guard page, a C area, and then an ML area.
	*/
	int size = (GuardStackletSize + MLStackletSize + CStackletSize) * kilobyte;	/* for just one of the two: primary and replica */

	assert(stackletOffset == size);
	for (i=0; i<NumStacklet; i++)
		if (CompareAndSwap(&Stacklets[i].count, 0, 1) == 0) {
			res = &Stacklets[i];
			break;
		}
	if (res == NULL)
		DIE("out of stack space");

	res->parent = stackChain;
	res->state = Inconsistent;
	if (!res->mapped) {
		mem_t start = my_mmap(2 * size, PROT_READ | PROT_WRITE);
		mem_t middle = start + size / (sizeof (val_t));

		res->baseExtendedBottom = start +
			(GuardStackletSize * kilobyte) / (sizeof (val_t));
		res->baseBottom = res->baseExtendedBottom +
			(CStackletSize * kilobyte) / (sizeof (val_t));
		res->baseTop = res->baseBottom +
			(MLStackletSize * kilobyte) / (sizeof (val_t));
		assert(res->baseTop == middle);
		/*
			Get some initial room in multiples of 64 bytes; Sparc
			requires at least 68 byte for the save area.
		*/
		res->baseTop -= (128 / sizeof(val_t));
		my_mprotect(0, (caddr_t) start, GuardStackletSize * kilobyte,
			PROT_NONE);	/* Guard page at bottom of primary */
		my_mprotect(1, (caddr_t) middle, GuardStackletSize * kilobyte,
			PROT_NONE);	/* Guard page at bottom of replica */

		res->callinfoStack = SetCreate(size / (32 * sizeof (val_t)));
		res->mapped = 1;
	}
	res->baseCursor = res->baseTop;
	for (i=0; i<32; i++)
		res->bottomBaseRegs[i] = 0;
	SetReset(res->callinfoStack);
	return res;
}
コード例 #5
0
void NaClUntrustedThreadSuspend(struct NaClAppThread *natp,
                                int save_registers) {
  Atomic32 old_state;
  Atomic32 suspending_state;

  /*
   * We do not want the thread to enter a NaCl syscall and start
   * taking locks when pthread_kill() takes effect, so we ask the
   * thread to suspend even if it is currently running untrusted code.
   */
  while (1) {
    old_state = natp->suspend_state;
    DCHECK((old_state & NACL_APP_THREAD_SUSPENDING) == 0);
    suspending_state = old_state | NACL_APP_THREAD_SUSPENDING;
    if (CompareAndSwap(&natp->suspend_state, old_state, suspending_state)
        != old_state) {
      continue;  /* Retry */
    }
    break;
  }
  /*
   * Once the thread has NACL_APP_THREAD_SUSPENDING set, it may not
   * change state itself, so there should be no race condition in this
   * check.
   */
  DCHECK(natp->suspend_state == suspending_state);

  if (old_state == NACL_APP_THREAD_UNTRUSTED) {
    /*
     * Allocate register state struct if needed.  This is race-free
     * when we are called by NaClUntrustedThreadsSuspendAll(), since
     * that claims nap->threads_mu.
     */
    if (save_registers && natp->suspended_registers == NULL) {
      natp->suspended_registers = malloc(sizeof(*natp->suspended_registers));
      if (natp->suspended_registers == NULL) {
        NaClLog(LOG_FATAL, "NaClUntrustedThreadSuspend: malloc() failed\n");
      }
    }
    if (pthread_kill(natp->thread.tid, NACL_THREAD_SUSPEND_SIGNAL) != 0) {
      NaClLog(LOG_FATAL, "NaClUntrustedThreadSuspend: "
              "pthread_kill() call failed\n");
    }
    WaitForUntrustedThreadToSuspend(natp);
  }
}
コード例 #6
0
void NaClAppThreadSetSuspendState(struct NaClAppThread *natp,
                                  enum NaClSuspendState old_state,
                                  enum NaClSuspendState new_state) {
  while (1) {
    Atomic32 state = natp->suspend_state;
    if ((state & NACL_APP_THREAD_SUSPENDING) != 0) {
      /* We have been asked to suspend, so wait. */
      FutexWait(&natp->suspend_state, state);
      continue;  /* Retry */
    }

    CHECK(state == (Atomic32) old_state);
    if (CompareAndSwap(&natp->suspend_state, old_state, new_state)
        != (Atomic32) old_state) {
      continue;  /* Retry */
    }
    break;
  }
}
コード例 #7
0
void NaClUntrustedThreadResume(struct NaClAppThread *natp) {
  Atomic32 old_state;
  Atomic32 new_state;
  while (1) {
    old_state = natp->suspend_state;
    new_state = old_state & ~(NACL_APP_THREAD_SUSPENDING |
                              NACL_APP_THREAD_SUSPENDED);
    DCHECK((old_state & NACL_APP_THREAD_SUSPENDING) != 0);
    if (CompareAndSwap(&natp->suspend_state, old_state, new_state)
        != old_state) {
      continue;  /* Retry */
    }
    break;
  }

  /*
   * TODO(mseaborn): A refinement would be to wake up the thread only
   * if it actually suspended during the context switch.
   */
  FutexWake(&natp->suspend_state, 1);
}
コード例 #8
0
ファイル: memobj.c プロジェクト: RobertHarper/TILT-Compiler
StackChain_t*
StackChain_BaseAlloc(Thread_t* t, int n)
{
	int i;
	for (i=0; i<NumStackChain; i++)
		if (CompareAndSwap(&StackChains[i].used, 0, 1) == 0) {
			StackChain_t *res = &StackChains[i];
			/* memBarrier(); */
			assert(res->used == 1);
			res->cursor = 0;
			res->thread = t;
			res->avail = n;
			assert(n > 0);
			res->stacklets = (Stacklet_t**)emalloc(sizeof(Stacklet_t*) * n);
			for (i=0; i < n; i++) {
				res->stacklets[i] = NULL;
			}
			return res;
		}
	DIE("could not allocate new stack chain\n");
	return 0;	/* NOTREACHED */
}
コード例 #9
0
ファイル: memobj.c プロジェクト: RobertHarper/TILT-Compiler
/* Copy primary into replica */
int
Stacklet_Copy(Stacklet_t* stacklet)
{
	StackletState_t state;

	state = CompareAndSwap((int*) &stacklet->state, Pending, Copying);
	if (state == Pending) {
		int i;
		int activeSize = (stacklet->baseTop - stacklet->baseCursor) *
			sizeof(val_t);
		mem_t primaryCursor = stacklet->baseCursor +
			(primaryStackletOffset / sizeof(val_t));
		mem_t replicaCursor = stacklet->baseCursor +
			(replicaStackletOffset / sizeof(val_t));
		volatile reg_t* primaryRegs = &stacklet->bottomBaseRegs
			[primaryStackletOffset == 0 ? 0 : 32];
		volatile reg_t* replicaRegs = &stacklet->bottomBaseRegs
			[primaryStackletOffset == 0 ? 32 : 0];
		assert(stacklet->count > 0);
		assert(stacklet->baseExtendedBottom <= stacklet->baseCursor);
		assert(stacklet->baseCursor <= stacklet->baseTop);
		SetReset(stacklet->callinfoStack);
		stacklet->replicaCursor = stacklet->baseCursor;
		stacklet->replicaRetadd = stacklet->retadd;
		memcpy(replicaCursor, primaryCursor, activeSize);
		for (i=0; i<32; i++)
			replicaRegs[i] = primaryRegs[i];
		stacklet->state = InactiveCopied;
		return 1;
	}
	while (stacklet->state == Copying)
		;
	assert(stacklet->state != Inconsistent);
	assert(stacklet->state != Pending);
	return 0;
}
コード例 #10
0
ファイル: mach_override.c プロジェクト: 0xb1dd1e/ToGL
    mach_error_t
mach_override_ptr(
	void *originalFunctionAddress,
    const void *overrideFunctionAddress,
    void **originalFunctionReentryIsland )
{
	assert( originalFunctionAddress );
	assert( overrideFunctionAddress );
	
	// this addresses overriding such functions as AudioOutputUnitStart()
	// test with modified DefaultOutputUnit project
#if defined(__x86_64__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp qword near [rip+0x????????]
            originalFunctionAddress=*(void**)((char*)originalFunctionAddress+6+*(int32_t *)((uint16_t*)originalFunctionAddress+1));
        else break;
    }
#elif defined(__i386__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp *0x????????
            originalFunctionAddress=**(void***)((uint16_t*)originalFunctionAddress+1);
        else break;
    }
#endif

	long	*originalFunctionPtr = (long*) originalFunctionAddress;
	mach_error_t	err = err_none;
	
#if defined(__ppc__) || defined(__POWERPC__)
	//	Ensure first instruction isn't 'mfctr'.
	#define	kMFCTRMask			0xfc1fffff
	#define	kMFCTRInstruction	0x7c0903a6
	
	long	originalInstruction = *originalFunctionPtr;
	if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) )
		err = err_cannot_override;
#elif defined(__i386__) || defined(__x86_64__)
	int eatenCount = 0;
	int originalInstructionCount = 0;
	char originalInstructions[kOriginalInstructionsSize];
	uint8_t originalInstructionSizes[kOriginalInstructionsSize];
	uint64_t jumpRelativeInstruction = 0; // JMP

	Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr, 
										&jumpRelativeInstruction, &eatenCount, 
										originalInstructions, &originalInstructionCount, 
										originalInstructionSizes );
	if (eatenCount + kMaxFixupSizeIncrease > kOriginalInstructionsSize) {
		//printf ("Too many instructions eaten\n");
		overridePossible = false;
	}
	if (!overridePossible) err = err_cannot_override;
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
#endif
	
	//	Make the original function implementation writable.
	if( !err ) {
		err = vm_protect( mach_task_self(),
				(vm_address_t) originalFunctionPtr, 8, false,
				(VM_PROT_ALL | VM_PROT_COPY) );
		if( err )
			err = vm_protect( mach_task_self(),
					(vm_address_t) originalFunctionPtr, 8, false,
					(VM_PROT_DEFAULT | VM_PROT_COPY) );
	}
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	
	//	Allocate and target the escape island to the overriding function.
	BranchIsland	*escapeIsland = NULL;
	if( !err )	
		err = allocateBranchIsland( &escapeIsland, originalFunctionAddress );
		if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	
#if defined(__ppc__) || defined(__POWERPC__)
	if( !err )
		err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 );
	
	//	Build the branch absolute instruction to the escape island.
	long	branchAbsoluteInstruction = 0; // Set to 0 just to silence warning.
	if( !err ) {
		long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF;
		branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress;
	}
#elif defined(__i386__) || defined(__x86_64__)
        if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	if( !err )
		err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 );
 
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	// Build the jump relative instruction to the escape island
#endif


#if defined(__i386__) || defined(__x86_64__)
	if (!err) {
		uint32_t addressOffset = ((char*)escapeIsland - (char*)originalFunctionPtr - 5);
		addressOffset = OSSwapInt32(addressOffset);
		
		jumpRelativeInstruction |= 0xE900000000000000LL; 
		jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24;
		jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction);		
	}
#endif
	
	//	Optionally allocate & return the reentry island. This may contain relocated
	//  jmp instructions and so has all the same addressing reachability requirements
	//  the escape island has to the original function, except the escape island is
	//  technically our original function.
	BranchIsland	*reentryIsland = NULL;
	if( !err && originalFunctionReentryIsland ) {
		err = allocateBranchIsland( &reentryIsland, escapeIsland);
		if( !err )
			*originalFunctionReentryIsland = reentryIsland;
	}
	
#if defined(__ppc__) || defined(__POWERPC__)	
	//	Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instruction into the reentry island.
	//		o Target the reentry island at the 2nd instruction of the
	//		  original function.
	//	o Replace the original instruction with the branch absolute.
	if( !err ) {
		int escapeIslandEngaged = false;
		do {
			if( reentryIsland )
				err = setBranchIslandTarget( reentryIsland,
						(void*) (originalFunctionPtr+1), originalInstruction );
			if( !err ) {
				escapeIslandEngaged = CompareAndSwap( originalInstruction,
										branchAbsoluteInstruction,
										(UInt32*)originalFunctionPtr );
				if( !escapeIslandEngaged ) {
					//	Someone replaced the instruction out from under us,
					//	re-read the instruction, make sure it's still not
					//	'mfctr' and try again.
					originalInstruction = *originalFunctionPtr;
					if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction)
						err = err_cannot_override;
				}
			}
		} while( !err && !escapeIslandEngaged );
	}
#elif defined(__i386__) || defined(__x86_64__)
	// Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instructions into the reentry island.
	//		o Target the reentry island at the first non-replaced 
	//        instruction of the original function.
	//	o Replace the original first instructions with the jump relative.
	//
	// Note that on i386, we do not support someone else changing the code under our feet
	if ( !err ) {
		uint32_t offset = (uintptr_t)originalFunctionPtr - (uintptr_t)reentryIsland;
		fixupInstructions(offset, originalInstructions,
					originalInstructionCount, originalInstructionSizes );
	
		if( reentryIsland )
			err = setBranchIslandTarget_i386( reentryIsland,
										 (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions );
		// try making islands executable before planting the jmp
#if defined(__x86_64__) || defined(__i386__)
        if( !err )
            err = makeIslandExecutable(escapeIsland);
        if( !err && reentryIsland )
            err = makeIslandExecutable(reentryIsland);
#endif
		if ( !err )
			atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction);
	}
#endif
	
	//	Clean up on error.
	if( err ) {
		if( reentryIsland )
			freeBranchIsland( reentryIsland );
		if( escapeIsland )
			freeBranchIsland( escapeIsland );
	}

	return err;
}
コード例 #11
0
    mach_error_t
__asan_mach_override_ptr_custom(
	void *originalFunctionAddress,
    const void *overrideFunctionAddress,
    void **originalFunctionReentryIsland,
		island_malloc *alloc,
		island_free *dealloc)
{
	assert( originalFunctionAddress );
	assert( overrideFunctionAddress );
	
	// this addresses overriding such functions as AudioOutputUnitStart()
	// test with modified DefaultOutputUnit project
#if defined(__x86_64__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp qword near [rip+0x????????]
            originalFunctionAddress=*(void**)((char*)originalFunctionAddress+6+*(int32_t *)((uint16_t*)originalFunctionAddress+1));
        else break;
    }
#elif defined(__i386__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp *0x????????
            originalFunctionAddress=**(void***)((uint16_t*)originalFunctionAddress+1);
        else break;
    }
#endif
#ifdef DEBUG_DISASM
  {
    fprintf(stderr, "Replacing function at %p\n", originalFunctionAddress);
    fprintf(stderr, "First 16 bytes of the function: ");
    unsigned char *orig = (unsigned char *)originalFunctionAddress;
    int i;
    for (i = 0; i < 16; i++) {
       fprintf(stderr, "%x ", (unsigned int) orig[i]);
    }
    fprintf(stderr, "\n");
    fprintf(stderr, 
            "To disassemble, save the following function as disas.c"
            " and run:\n  gcc -c disas.c && gobjdump -d disas.o\n"
            "The first 16 bytes of the original function will start"
            " after four nop instructions.\n");
    fprintf(stderr, "\nvoid foo() {\n  asm volatile(\"nop;nop;nop;nop;\");\n");
    int j = 0;
    for (j = 0; j < 2; j++) {
      fprintf(stderr, "  asm volatile(\".byte ");
      for (i = 8 * j; i < 8 * (j+1) - 1; i++) {
        fprintf(stderr, "0x%x, ", (unsigned int) orig[i]);
      }
      fprintf(stderr, "0x%x;\");\n", (unsigned int) orig[8 * (j+1) - 1]);
    }
    fprintf(stderr, "}\n\n");
  }
#endif

	long	*originalFunctionPtr = (long*) originalFunctionAddress;
	mach_error_t	err = err_none;
	
#if defined(__ppc__) || defined(__POWERPC__)
	//	Ensure first instruction isn't 'mfctr'.
	#define	kMFCTRMask			0xfc1fffff
	#define	kMFCTRInstruction	0x7c0903a6
	
	long	originalInstruction = *originalFunctionPtr;
	if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) )
		err = err_cannot_override;
#elif defined(__i386__) || defined(__x86_64__)
	int eatenCount = 0;
	int originalInstructionCount = 0;
	char originalInstructions[kOriginalInstructionsSize];
	uint8_t originalInstructionSizes[kOriginalInstructionsSize];
	uint64_t jumpRelativeInstruction = 0; // JMP

	Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr, 
										&jumpRelativeInstruction, &eatenCount, 
										originalInstructions, &originalInstructionCount, 
										originalInstructionSizes );
#ifdef DEBUG_DISASM
  if (!overridePossible) fprintf(stderr, "overridePossible = false @%d\n", __LINE__);
#endif
	if (eatenCount > kOriginalInstructionsSize) {
#ifdef DEBUG_DISASM
		fprintf(stderr, "Too many instructions eaten\n");
#endif    
		overridePossible = false;
	}
	if (!overridePossible) err = err_cannot_override;
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
#endif
	
	//	Make the original function implementation writable.
	if( !err ) {
		err = vm_protect( mach_task_self(),
				(vm_address_t) originalFunctionPtr, 8, false,
				(VM_PROT_ALL | VM_PROT_COPY) );
		if( err )
			err = vm_protect( mach_task_self(),
					(vm_address_t) originalFunctionPtr, 8, false,
					(VM_PROT_DEFAULT | VM_PROT_COPY) );
	}
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	
	//	Allocate and target the escape island to the overriding function.
	BranchIsland	*escapeIsland = NULL;
	if( !err )
		err = alloc( (void**)&escapeIsland, sizeof(BranchIsland), originalFunctionAddress );
	if ( err ) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	
#if defined(__ppc__) || defined(__POWERPC__)
	if( !err )
		err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 );
	
	//	Build the branch absolute instruction to the escape island.
	long	branchAbsoluteInstruction = 0; // Set to 0 just to silence warning.
	if( !err ) {
		long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF;
		branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress;
	}
#elif defined(__i386__) || defined(__x86_64__)
        if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	if( !err )
		err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 );
 
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	// Build the jump relative instruction to the escape island
#endif


#if defined(__i386__) || defined(__x86_64__)
	if (!err) {
		uint32_t addressOffset = ((char*)escapeIsland - (char*)originalFunctionPtr - 5);
		addressOffset = OSSwapInt32(addressOffset);
		
		jumpRelativeInstruction |= 0xE900000000000000LL; 
		jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24;
		jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction);		
	}
#endif
	
	//	Optionally allocate & return the reentry island. This may contain relocated
	//  jmp instructions and so has all the same addressing reachability requirements
	//  the escape island has to the original function, except the escape island is
	//  technically our original function.
	BranchIsland	*reentryIsland = NULL;
	if( !err && originalFunctionReentryIsland ) {
		err = alloc( (void**)&reentryIsland, sizeof(BranchIsland), escapeIsland);
		if( !err )
			*originalFunctionReentryIsland = reentryIsland;
	}
	
#if defined(__ppc__) || defined(__POWERPC__)	
	//	Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instruction into the reentry island.
	//		o Target the reentry island at the 2nd instruction of the
	//		  original function.
	//	o Replace the original instruction with the branch absolute.
	if( !err ) {
		int escapeIslandEngaged = false;
		do {
			if( reentryIsland )
				err = setBranchIslandTarget( reentryIsland,
						(void*) (originalFunctionPtr+1), originalInstruction );
			if( !err ) {
				escapeIslandEngaged = CompareAndSwap( originalInstruction,
										branchAbsoluteInstruction,
										(UInt32*)originalFunctionPtr );
				if( !escapeIslandEngaged ) {
					//	Someone replaced the instruction out from under us,
					//	re-read the instruction, make sure it's still not
					//	'mfctr' and try again.
					originalInstruction = *originalFunctionPtr;
					if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction)
						err = err_cannot_override;
				}
			}
		} while( !err && !escapeIslandEngaged );
	}
#elif defined(__i386__) || defined(__x86_64__)
	// Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instructions into the reentry island.
	//		o Target the reentry island at the first non-replaced 
	//        instruction of the original function.
	//	o Replace the original first instructions with the jump relative.
	//
	// Note that on i386, we do not support someone else changing the code under our feet
	if ( !err ) {
		fixupInstructions(originalFunctionPtr, reentryIsland, originalInstructions,
					originalInstructionCount, originalInstructionSizes );
	
		if( reentryIsland )
			err = setBranchIslandTarget_i386( reentryIsland,
										 (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions );
		// try making islands executable before planting the jmp
#if defined(__x86_64__) || defined(__i386__)
        if( !err )
            err = makeIslandExecutable(escapeIsland);
        if( !err && reentryIsland )
            err = makeIslandExecutable(reentryIsland);
#endif
		if ( !err )
			atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction);
	}
#endif
	
	//	Clean up on error.
	if( err ) {
		if( reentryIsland )
			dealloc( reentryIsland );
		if( escapeIsland )
			dealloc( escapeIsland );
	}

#ifdef DEBUG_DISASM
  {
    fprintf(stderr, "First 16 bytes of the function after slicing: ");
    unsigned char *orig = (unsigned char *)originalFunctionAddress;
    int i;
    for (i = 0; i < 16; i++) {
       fprintf(stderr, "%x ", (unsigned int) orig[i]);
    }
    fprintf(stderr, "\n");
  }
#endif
	return err;
}
コード例 #12
0
    mach_error_t
mach_override_ptr(
	void *originalFunctionAddress,
    const void *overrideFunctionAddress,
    void **originalFunctionReentryIsland )
{
	assert( originalFunctionAddress );
	assert( overrideFunctionAddress );
	
	long	*originalFunctionPtr = (long*) originalFunctionAddress;
	mach_error_t	err = err_none;
	
#if defined(__ppc__) || defined(__POWERPC__)
	//	Ensure first instruction isn't 'mfctr'.
	#define	kMFCTRMask			0xfc1fffff
	#define	kMFCTRInstruction	0x7c0903a6
	
	long	originalInstruction = *originalFunctionPtr;
	if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) )
		err = err_cannot_override;
#elif defined(__i386__) || defined(__x86_64__)
	int eatenCount = 0;
	char originalInstructions[kOriginalInstructionsSize];
	uint64_t jumpRelativeInstruction = 0; // JMP

	Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr, 
										&jumpRelativeInstruction, &eatenCount, originalInstructions);
	if (eatenCount > kOriginalInstructionsSize) {
		//printf ("Too many instructions eaten\n");
		overridePossible = false;
	}
	if (!overridePossible) err = err_cannot_override;
	if (err) printf("err = %x %d\n", err, __LINE__);
#endif
	
	//	Make the original function implementation writable.
	if( !err ) {
		err = vm_protect( mach_task_self(),
				(vm_address_t) originalFunctionPtr,
				sizeof(long), false, (VM_PROT_ALL | VM_PROT_COPY) );
		if( err )
			err = vm_protect( mach_task_self(),
					(vm_address_t) originalFunctionPtr, sizeof(long), false,
					(VM_PROT_DEFAULT | VM_PROT_COPY) );
	}
	if (err) printf("err = %x %d\n", err, __LINE__);
	
	//	Allocate and target the escape island to the overriding function.
	BranchIsland	*escapeIsland = NULL;
	if( !err )	
		err = allocateBranchIsland( &escapeIsland, kAllocateHigh, originalFunctionAddress );
		if (err) printf("err = %x %d\n", err, __LINE__);

	
#if defined(__ppc__) || defined(__POWERPC__)
	if( !err )
		err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 );
	
	//	Build the branch absolute instruction to the escape island.
	long	branchAbsoluteInstruction = 0; // Set to 0 just to silence warning.
	if( !err ) {
		long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF;
		branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress;
	}
#elif defined(__i386__) || defined(__x86_64__)
        if (err) printf("err = %x %d\n", err, __LINE__);

	if( !err )
		err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 );
 
	if (err) printf("err = %x %d\n", err, __LINE__);
	// Build the jump relative instruction to the escape island
#endif


#if defined(__i386__) || defined(__x86_64__)
	if (!err) {
		uint32_t addressOffset = ((void*)escapeIsland - (void*)originalFunctionPtr - 5);
		addressOffset = OSSwapInt32(addressOffset);
		
		jumpRelativeInstruction |= 0xE900000000000000LL; 
		jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24;
		jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction);		
	}
#endif
	
	//	Optionally allocate & return the reentry island.
	BranchIsland	*reentryIsland = NULL;
	if( !err && originalFunctionReentryIsland ) {
		err = allocateBranchIsland( &reentryIsland, kAllocateNormal, NULL);
		if( !err )
			*originalFunctionReentryIsland = reentryIsland;
	}
	
#if defined(__ppc__) || defined(__POWERPC__)	
	//	Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instruction into the reentry island.
	//		o Target the reentry island at the 2nd instruction of the
	//		  original function.
	//	o Replace the original instruction with the branch absolute.
	if( !err ) {
		int escapeIslandEngaged = false;
		do {
			if( reentryIsland )
				err = setBranchIslandTarget( reentryIsland,
						(void*) (originalFunctionPtr+1), originalInstruction );
			if( !err ) {
				escapeIslandEngaged = CompareAndSwap( originalInstruction,
										branchAbsoluteInstruction,
										(UInt32*)originalFunctionPtr );
				if( !escapeIslandEngaged ) {
					//	Someone replaced the instruction out from under us,
					//	re-read the instruction, make sure it's still not
					//	'mfctr' and try again.
					originalInstruction = *originalFunctionPtr;
					if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction)
						err = err_cannot_override;
				}
			}
		} while( !err && !escapeIslandEngaged );
	}
#elif defined(__i386__) || defined(__x86_64__)
	// Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instructions into the reentry island.
	//		o Target the reentry island at the first non-replaced 
	//        instruction of the original function.
	//	o Replace the original first instructions with the jump relative.
	//
	// Note that on i386, we do not support someone else changing the code under our feet
	if ( !err ) {
		if( reentryIsland )
			err = setBranchIslandTarget_i386( reentryIsland,
										 (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions );
		if ( !err )
			atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction);
	}
#endif
	
	//	Clean up on error.
	if( err ) {
		if( reentryIsland )
			freeBranchIsland( reentryIsland );
		if( escapeIsland )
			freeBranchIsland( escapeIsland );
	}

#if defined(__x86_64__)
        err = makeIslandExecutable(escapeIsland);
        err = makeIslandExecutable(reentryIsland);
#endif
	
	return err;
}
コード例 #13
0
OSStatus
PostEventSIH( void* p1, void* p2 )
{
    DVCEventRecordPtr			pEvent = (DVCEventRecordPtr) p1;
    DVEventEntryPtr				pEventEntry;
    DVNotificationEntryPtr		pEntry;
    OSErr						error = noErr;

    // We now have two broad classifications of events - ones that need to be
    // reported ASAP, which are stream related:
    //
    // 		kDVIsochReadComplete
    //		kDVIsochWriteComplete
    //
    // and ones that are device management related, whose notifications will
    // probably generate massive amounts of task-level only Toolbox calls:
    //
    //		kDVDeviceAdded
    //		kDVDeviceRemoved
    //		kDVIsochReadEnabled
    //		kDVIsochReadDisabled
    //		kDVIsochWriteEnabled
    //		kDVIsochWriteDisabled
    //
    // We ship the low-latency notifications to secondary interrupt, while
    // the task level calls we queue and get back to them when someone
    // calls DVIdle().
    //

    // ok, so let's go find out who's waiting for this event

    // go through list looking for the curious
    pEntry = (DVNotificationEntryPtr) gpFamilyGlobals->notificationQueue->qHead;
    while ( pEntry != nil )
    {
        if ( (pEvent->eventHeader.theEvent & pEntry->wantedEvents) != nil )
        {
            // only send notification if it's a global connection id or if
            // the event came from the same deviceID as this notif entry
            if ( (pEntry->deviceID == kDVGlobalEventConnectionID) ||
                    (pEvent->eventHeader.deviceID == pEntry->deviceID) )
            {
                // we currently only support a one-shot notification, like clock callbacks
                pEntry->wantedEvents = nil;

                // make sure the event contains this notification id
                pEvent->eventHeader.notifID = (DVCNotificationID) pEntry;


                // check before calling..
                switch( pEvent->eventHeader.theEvent )
                {
                case kDVIsochReadComplete:
                case kDVIsochWriteComplete:
                    // process event immediately...
                    error = (*pEntry->notifyProc)( pEvent, pEntry->userRefCon );
                    break;

                case kDVDeviceAdded:
                case kDVDeviceRemoved:
                case kDVIsochReadEnabled:
                case kDVIsochReadDisabled:
                case kDVIsochWriteEnabled:
                case kDVIsochWriteDisabled:
                    // queue the event and proc for later processing...

                    // get an entry
                    error = PBDequeueFirst( gpFamilyGlobals->availableDVEvents,
                                            (QElemPtr*) &pEventEntry );

                    // if we don't have any more available event elements,
                    // we just drop the events on the floor

                    // copy the notify proc & refcon
                    if ( error == noErr )
                    {
                        pEventEntry->notifyProc	= pEntry->notifyProc;
                        pEventEntry->userRefCon = pEntry->userRefCon;
                    }

                    // copy the event
                    if ( error == noErr )
                        BlockCopy( pEvent, &(pEventEntry->eventRec), sizeof( DVCEventRecord ) );

                    // queue it
                    if ( error == noErr )
                        PBEnqueue( (QElemPtr) pEventEntry, gpFamilyGlobals->receivedDVEvents );

                    // If we haven't already sent notification
                    // to Notification Mgr to run tasks, do it now...
                    if ( CompareAndSwap( false, true, &(gpFamilyGlobals->nmIsInstalled) ) )
                        NMInstall( &(gpFamilyGlobals->dvNMRec) );

                    break;

                default:
                    break;
                }

            }
        }

        // next entry
        pEntry = (DVNotificationEntryPtr) pEntry->qLink;
    }

    return( error );
}