Пример #1
0
void statm_page_range(struct task_struct *mach_task, 
		      vm_address_t vm_start, 
		      vm_address_t vm_end,
		      int *pages, 
		      int *shared, 
		      int *dirty, 
		      int *total)
{
  vm_address_t			start_addr;
  mach_port_t			mach_task_port;
  kern_return_t			kr;
  vm_machine_attribute_val_t	val;
  *pages = 0;
  *shared = 0;
  *dirty = 0;
  *total = 0;

  mach_task_port = mach_task->osfmach3.task->mach_task_port;
  start_addr = vm_start;
  /* Hack: 'kswapd' and 'kflushd' have all of memory mapped! Skip them... */
  if ((unsigned)vm_end >= 0x80000000) {
    *pages = *shared = *total = 0;
    return;
  }
  val = MATTR_VAL_GET_INFO;
  kr = vm_machine_attribute(mach_task_port,
			    start_addr, 
			    vm_end - vm_start,
			    MATTR_CACHE, &val);
  if (kr == KERN_SUCCESS) {
    *pages = val >> 16;
    *shared = val & 0xFFFF;
  } else {
Пример #2
0
/*
 * This is currently only implemented on the hppa architecture so versions for
 * the other architectures are hacked in here.
 */
extern
kern_return_t
vm_flush_cache(
mach_port_t target_task,
vm_address_t address,
vm_size_t size)
{
#if !defined(__m68k__) && !defined(__i386__)
	vm_machine_attribute_val_t value;
	value = MATTR_VAL_ICACHE_FLUSH;
	return(vm_machine_attribute(target_task, address, size, MATTR_CACHE,
				    &value));
#endif
#ifdef __m68k__
	asm("trap #2");
	return(KERN_SUCCESS);
#endif
#ifdef __i386__
	asm("jmp 1f");
	asm("1: nop");
	return(KERN_SUCCESS);
#endif
}
Пример #3
0
int
mach_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write, 
		  task_t task)
{
  mach_vm_address_t r_start = 0;
  mach_vm_address_t r_end = 0;
  mach_vm_size_t r_size = 0;

  vm_prot_t orig_protection = 0;
  vm_prot_t max_orig_protection = 0;

  CORE_ADDR cur_memaddr;
  char *cur_myaddr;
  int cur_len;

  vm_size_t pagesize = child_get_pagesize ();
  kern_return_t kret;
  int ret;

  /* check for out-of-range address */
  r_start = memaddr;
  if (r_start != memaddr)
    {
      errno = EINVAL;
      return 0;
    }
  
  if (len == 0)
    {
      return 0;
    }
  
  CHECK_FATAL (myaddr != NULL);
  errno = 0;
  
  /* check for case where memory available only at address greater than address specified */
  {
    kret = macosx_get_region_info (task, memaddr, &r_start, &r_size,
				   &orig_protection, &max_orig_protection);
    if (kret != KERN_SUCCESS)
      {
        return 0;
      }
    
    if (r_start > memaddr)
      {
        if ((r_start - memaddr) <= MINUS_INT_MIN)
          {
#ifdef DEBUG_MACOSX_MUTILS
            mutils_debug
              ("First available address near 0x%8.8llx is at 0x%8.8llx; returning\n",
               (uint64_t) memaddr, (uint64_t) r_start);
#endif
            return -(r_start - memaddr);
          }
        else
          {
#ifdef DEBUG_MACOSX_MUTILS
            mutils_debug ("First available address near 0x%8.8llx is at 0x%8.8llx"
                          "(too far; returning 0)\n",
                          (uint64_t) memaddr, (uint64_t) r_start);
#endif
            return 0;
          }
      }
  }

  cur_memaddr = memaddr;
  cur_myaddr = myaddr;
  cur_len = len;

  while (cur_len > 0)
    {
      int changed_protections = 0;

      /* We want the inner-most map containing our address, so set
	 the recurse depth to some high value, and call mach_vm_region_recurse.  */
      kret = macosx_get_region_info (task, cur_memaddr, &r_start, &r_size, 
				     &orig_protection, &max_orig_protection);
      
      if (r_start > cur_memaddr)
        {
#ifdef DEBUG_MACOSX_MUTILS
          mutils_debug
            ("Next available region for address at 0x%8.8llx is 0x%8.8llx\n",
             (uint64_t) cur_memaddr, (uint64_t) r_start);
#endif
          break;
        }

      if (write)
        {
	  /* Keep the execute permission if we modify protections.  */
	  vm_prot_t new_prot = VM_PROT_READ | VM_PROT_WRITE;
	  	  
	  /* Do we need to modify our protections?  */
	  if (orig_protection & VM_PROT_WRITE)
	    {
	      /* We don't need to modify our protections.  */
	      kret = KERN_SUCCESS;
#ifdef DEBUG_MACOSX_MUTILS
	      mutils_debug ("We already have write access to the region "
			    "containing: 0x%8.8llx, skipping permission modification.\n",
			    (uint64_t) cur_memaddr);
#endif
	    }
	  else
	    {
	      changed_protections = 1;
	      mach_vm_size_t prot_size;

	      if (cur_len < r_size - (cur_memaddr - r_start))
		prot_size = cur_len;
	      else
		prot_size = cur_memaddr - r_start;

	      kret = macosx_vm_protect (task, r_start, r_size, 
					cur_memaddr, prot_size, new_prot, 0);

	      if (kret != KERN_SUCCESS)
		{
#ifdef DEBUG_MACOSX_MUTILS
		  mutils_debug ("Without COPY failed: %s (0x%lx)\n",
				MACH_ERROR_STRING (kret), kret);
#endif
		  kret = macosx_vm_protect (task, r_start, r_size, 
					    cur_memaddr, prot_size, 
					    VM_PROT_COPY | new_prot, 0);
		}

	      if (kret != KERN_SUCCESS)
		{
#ifdef DEBUG_MACOSX_MUTILS
		  mutils_debug
		    ("Unable to add write access to region at 0x8.8llx: %s (0x%lx)\n",
		     (uint64_t) r_start, MACH_ERROR_STRING (kret), kret);
#endif
		  break;
		}
	    }
        }

      r_end = r_start + r_size;

      CHECK_FATAL (r_start <= cur_memaddr);
      CHECK_FATAL (r_end >= cur_memaddr);
      CHECK_FATAL ((r_start % pagesize) == 0);
      CHECK_FATAL ((r_end % pagesize) == 0);
      CHECK_FATAL (r_end >= (r_start + pagesize));

      if ((cur_memaddr % pagesize) != 0)
        {
          int max_len = pagesize - (cur_memaddr % pagesize);
          int op_len = cur_len;
          if (op_len > max_len)
            {
              op_len = max_len;
            }
          ret = mach_xfer_memory_remainder (cur_memaddr, cur_myaddr, op_len,
                                            write, task);
        }
      else if (cur_len >= pagesize)
        {
          int max_len = r_end - cur_memaddr;
          int op_len = cur_len;
          if (op_len > max_len)
            {
              op_len = max_len;
            }
          op_len -= (op_len % pagesize);
          ret = mach_xfer_memory_block (cur_memaddr, cur_myaddr, op_len,
                                        write, task);
        }
      else
        {
          ret = mach_xfer_memory_remainder (cur_memaddr, cur_myaddr, cur_len,
                                            write, task);
        }

      if (write)
        {
	  /* This vm_machine_attribute isn't supported on i386,
	     so let's not try.  */
#if defined (TARGET_POWERPC)
	  vm_machine_attribute_val_t flush = MATTR_VAL_CACHE_FLUSH;
          kret = vm_machine_attribute (task, r_start, r_size,
                                       MATTR_CACHE, &flush);
          if (kret != KERN_SUCCESS)
            {
              static int nwarn = 0;
              nwarn++;
              if (nwarn <= MAX_INSTRUCTION_CACHE_WARNINGS)
                {
                  warning
                    ("Unable to flush data/instruction cache for region at 0x%8.8llx: %s",
                     (uint64_t) r_start, MACH_ERROR_STRING (ret));
                }
              if (nwarn == MAX_INSTRUCTION_CACHE_WARNINGS)
                {
                  warning
                    ("Support for flushing the data/instruction cache on this "
		     "machine appears broken");
                  warning ("No further warning messages will be given.");
                }
            }
#endif
	  /* Try and restore permissions on the minimal address range.  */
	  if (changed_protections)
	    {
	      mach_vm_size_t prot_size;
	      if (cur_len < r_size - (cur_memaddr - r_start))
		prot_size = cur_len;
	      else
		prot_size = cur_memaddr - r_start;

	      kret = macosx_vm_protect (task, r_start, r_size, 
					cur_memaddr, prot_size, 
					orig_protection, 0);
	      if (kret != KERN_SUCCESS)
		{
		  warning
		    ("Unable to restore original permissions for region at 0x%8.8llx",
		     (uint64_t) r_start);
		}
	    }
        }


      cur_memaddr += ret;
      cur_myaddr += ret;
      cur_len -= ret;

      if (ret == 0)
        {
          break;
        }
    }

  return len - cur_len;
}
Пример #4
0
static int
mach_xfer_memory_remainder (CORE_ADDR memaddr, char *myaddr,
                            int len, int write,
                            task_t task)
{
  vm_size_t pagesize = child_get_pagesize ();

  vm_offset_t mempointer;       /* local copy of inferior's memory */
  mach_msg_type_number_t memcopied;     /* for vm_read to use */

  CORE_ADDR pageaddr = memaddr - (memaddr % pagesize);

  kern_return_t kret;

  CHECK_FATAL (((memaddr + len - 1) - ((memaddr + len - 1) % pagesize))
               == pageaddr);

  if (!write)
    {
      kret = mach_vm_read (task, pageaddr, pagesize, &mempointer, &memcopied);
      
      if (kret != KERN_SUCCESS)
	{
#ifdef DEBUG_MACOSX_MUTILS
	  mutils_debug
	    ("Unable to read page for region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n",
	     (uint64_t) pageaddr, (unsigned long) len,
	     MACH_ERROR_STRING (kret), kret);
#endif
	  return 0;
	}
      if (memcopied != pagesize)
	{
	  kret = vm_deallocate (mach_task_self (), mempointer, memcopied);
	  if (kret != KERN_SUCCESS)
	    {
	      warning
		("Unable to deallocate memory used by failed read from inferior: %s (0x%lx)",
		 MACH_ERROR_STRING (kret), (unsigned long) kret);
	    }
#ifdef DEBUG_MACOSX_MUTILS
	  mutils_debug
	    ("Unable to read region at 0x%8.8llx with length %lu from inferior: "
	     "vm_read returned %lu bytes instead of %lu\n",
	     (uint64_t) pageaddr, (unsigned long) pagesize,
	     (unsigned long) memcopied, (unsigned long) pagesize);
#endif
	  return 0;
	}
      
      memcpy (myaddr, ((unsigned char *) 0) + mempointer
              + (memaddr - pageaddr), len);
      kret = vm_deallocate (mach_task_self (), mempointer, memcopied);
      if (kret != KERN_SUCCESS)
	{
	  warning
	    ("Unable to deallocate memory used to read from inferior: %s (0x%ulx)",
	     MACH_ERROR_STRING (kret), kret);
	  return 0;
	}
    }
  else
    {
      /* We used to read in a whole page, then modify the page
	 contents, then write that page back out.  I bet we did that
	 so we didn't break up page maps or something like that.
	 However, in Leopard there's a bug in the shared cache
	 implementation, such that if we write into it with whole
	 pages the maximum page protections don't get set properly and
	 we can no longer reset the execute bit.  In 64 bit Leopard
	 apps, the execute bit has to be set or we can't run code from
	 there.

	 If we figure out that not writing whole pages causes problems
	 of it's own, then we will have to revisit this.  */

#if defined (TARGET_POWERPC)
      vm_machine_attribute_val_t flush = MATTR_VAL_CACHE_FLUSH;
      /* This vm_machine_attribute only works on PPC, so no reason
	 to keep failing on x86... */

      kret = vm_machine_attribute (mach_task_self (), mempointer,
                                   pagesize, MATTR_CACHE, &flush);
#ifdef DEBUG_MACOSX_MUTILS
      if (kret != KERN_SUCCESS)
        {
          mutils_debug
            ("Unable to flush GDB's address space after memcpy prior to vm_write: %s (0x%lx)\n",
             MACH_ERROR_STRING (kret), kret);
        }
#endif
#endif
      kret =
        mach_vm_write (task, memaddr, (pointer_t) myaddr, len);
      if (kret != KERN_SUCCESS)
        {
#ifdef DEBUG_MACOSX_MUTILS
          mutils_debug
            ("Unable to write region at 0x%8.8llx with length %lu to inferior: %s (0x%lx)\n",
             (uint64_t) memaddr, (unsigned long) len,
             MACH_ERROR_STRING (kret), kret);
#endif
          return 0;
        }
    }

  return len;
}