Ejemplo n.º 1
0
/* 
   Processes a LC_UNIXTHREAD command.
   Returns 0 on success, -1 on any failure.
   The stack is mapped in and returned in *out_stack. 
   The thread's entry point is returned in *out_entry.
*/
static int 
load_unixthread(vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, 
                vki_uint8_t **out_entry, struct thread_command *threadcmd)
{
   int err;
   vki_uint8_t *stack_end;
   int customstack;

   err = load_genericthread(&stack_end, &customstack, out_entry, threadcmd);
   if (err) return -1;

   if (!stack_end) {
      print("bad executable (no thread stack)\n");
      return -1;
   }

   if (!customstack) {
      // Map the stack
      vki_size_t stacksize = VG_PGROUNDUP(default_stack_size());
      vm_address_t stackbase = VG_PGROUNDDN(stack_end-stacksize);
      SysRes res;
        
      res = VG_(am_mmap_anon_fixed_client)(stackbase, stacksize, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC);
      check_mmap(res, stackbase, stacksize, "load_unixthread1");
      if (out_stack_start) *out_stack_start = (vki_uint8_t *)stackbase;
   } else {
      // custom stack - mapped via __UNIXTHREAD segment
   }

   if (out_stack_end) *out_stack_end = stack_end;

   return 0;
}
Ejemplo n.º 2
0
void check_mfd_readable(const char *filename, const int lineno, int fd)
{
	char buf[16];
	void *p;

	safe_read(filename, lineno, NULL, 1, fd, buf, sizeof(buf));
	tst_res_(filename, lineno, TPASS, "read(%d, %s, %zu) succeeded", fd,
		buf, sizeof(buf));

	/* verify PROT_READ *is* allowed */
	p = check_mmap(filename, lineno, NULL, MFD_DEF_SIZE, PROT_READ,
			MAP_PRIVATE, fd, 0);

	check_munmap(filename, lineno, p, MFD_DEF_SIZE);

	/* verify MAP_PRIVATE is *always* allowed (even writable) */
	p = check_mmap(filename, lineno, NULL, MFD_DEF_SIZE,
			PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);

	check_munmap(filename, lineno, p, MFD_DEF_SIZE);
}
Ejemplo n.º 3
0
void check_mfd_writeable(const char *filename, const int lineno, int fd)
{
	void *p;

	/* verify write() succeeds */
	safe_write(filename, lineno, NULL, 1, fd, "\0\0\0\0", 4);
	tst_res_(filename, lineno, TPASS, "write(%d, %s, %d) succeeded", fd,
		"\\0\\0\\0\\0", 4);

	/* verify PROT_READ | PROT_WRITE is allowed */
	p = check_mmap(filename, lineno, NULL, MFD_DEF_SIZE,
			PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);

	*(char *)p = 0;
	check_munmap(filename, lineno, p, MFD_DEF_SIZE);

	/* verify PROT_WRITE is allowed */
	p = check_mmap(filename, lineno, NULL, MFD_DEF_SIZE,
			PROT_WRITE, MAP_SHARED, fd, 0);

	*(char *)p = 0;
	check_munmap(filename, lineno, p, MFD_DEF_SIZE);

	/* verify PROT_READ with MAP_SHARED is allowed and a following
	 * mprotect(PROT_WRITE) allows writing
	 */
	p = check_mmap(filename, lineno, NULL, MFD_DEF_SIZE,
			PROT_READ, MAP_SHARED, fd, 0);

	check_mprotect(filename, lineno, p, MFD_DEF_SIZE,
			PROT_READ | PROT_WRITE);

	*(char *)p = 0;
	check_munmap(filename, lineno, p, MFD_DEF_SIZE);

	/* verify PUNCH_HOLE works */
	check_fallocate(filename, lineno, fd,
			FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
			MFD_DEF_SIZE);
}
Ejemplo n.º 4
0
// Pad all the empty spaces in a range of address space to stop interlopers.
void as_pad(void *start, void *end, int padfile)
{
   fillgap_extra extra;
   extra.fillgap_start   = start;
   extra.fillgap_end     = end;
   extra.fillgap_padfile = padfile;

   foreach_map(fillgap, &extra);
	
   if (extra.fillgap_start < extra.fillgap_end) {
      void* res = mmap(extra.fillgap_start, 
                       extra.fillgap_end - extra.fillgap_start,
                       PROT_NONE, MAP_FIXED|MAP_PRIVATE, padfile, 0);
      check_mmap(res, extra.fillgap_start, 
                 extra.fillgap_end - extra.fillgap_start);
   }
}
Ejemplo n.º 5
0
static int fillgap(char *segstart, char *segend, const char *perm, off_t off, 
                   int maj, int min, int ino, void* e)
{
   fillgap_extra* extra = e;

   if (segstart >= extra->fillgap_end)
      return 0;

   if (segstart > extra->fillgap_start) {
      void* res = mmap(extra->fillgap_start, segstart - extra->fillgap_start,
                       PROT_NONE, MAP_FIXED|MAP_PRIVATE, 
                       extra->fillgap_padfile, 0);
      check_mmap(res, extra->fillgap_start, segstart - extra->fillgap_start);
   }
   extra->fillgap_start = segend;
   
   return 1;
}
Ejemplo n.º 6
0
int main(int argc, char **argv)
{
	unsigned char *zero_mem, *test1_mem, *test2_mem;
	struct sched_param param = { .sched_priority = 1 };
	struct timespec zero = { .tv_sec = 0, .tv_nsec = 0 };
	struct sigaction sa;

	zero_mem = check_mmap(mmap(0, MEMSIZE, PROT_READ,
			      MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));
	test1_mem = check_mmap(mmap(0, MEMSIZE, PROT_READ,
				    MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));

	sigemptyset(&sa.sa_mask);
	sa.sa_sigaction = sigdebug_handler;
	sa.sa_flags = SA_SIGINFO;
	check_unix(sigaction(SIGDEBUG, &sa, NULL));

	check_unix(mlockall(MCL_CURRENT | MCL_FUTURE));

	check_pthread(pthread_setschedparam(pthread_self(), SCHED_FIFO, &param));

	printf("memory read\n");
	check_value("read mem", test1_mem[0], 0);

	pthread_set_mode_np(PTHREAD_WARNSW, 0);
	test2_mem = check_mmap(mmap(0, MEMSIZE, PROT_READ | PROT_WRITE,
				    MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));
	check_unix(mprotect(test2_mem, MEMSIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC));

	nanosleep(&zero, NULL);
	pthread_set_mode_np(0, PTHREAD_WARNSW);

	printf("memory write after exec enable\n");
	test2_mem[0] = 0xff;

	pthread_set_mode_np(PTHREAD_WARNSW, 0);
	check_unix(mprotect(test1_mem, MEMSIZE, PROT_READ | PROT_WRITE));

	nanosleep(&zero, NULL);
	pthread_set_mode_np(0, PTHREAD_WARNSW);

	printf("memory write after write enable\n");
	test1_mem[0] = 0xff;
	check_value("read zero", zero_mem[0], 0);

	pthread_set_mode_np(PTHREAD_WARNSW, 0);

	test1_mem = check_mmap(mmap(0, MEMSIZE, PROT_NONE,
				    MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));
	check_unix(mprotect(test1_mem, MEMSIZE, PROT_READ | PROT_WRITE));

	printf("memory read/write after access enable\n");
	check_value("read mem", test1_mem[0], 0);
	test1_mem[0] = 0xff;
	check_value("read zero", zero_mem[0], 0);

	fprintf(stderr, "Test OK\n");

	return 0;
}
Ejemplo n.º 7
0
/* 
   Process an LC_SEGMENT command, mapping it into memory if appropriate.
   fd[offset..size) is a Mach-O thin file. 
   Returns 0 on success, -1 on any failure.
   If this segment contains the executable's Mach headers, their 
     loaded address is returned in *text.
   If this segment is a __UNIXSTACK, its start address is returned in 
     *stack_start.
*/
static int
load_segment(int fd, vki_off_t offset, vki_off_t size, 
             vki_uint8_t **text, vki_uint8_t **stack_start, 
             struct SEGMENT_COMMAND *segcmd, const HChar *filename)
{
   SysRes res;
   Addr addr;
   vki_size_t filesize; // page-aligned 
   vki_size_t vmsize;   // page-aligned
   unsigned int prot;

   // GrP fixme mark __UNIXSTACK as SF_STACK
    
   // Don't honour the client's request to map PAGEZERO.  Why not?
   // Because when the kernel loaded the valgrind tool executable,
   // it will have mapped pagezero itself.  So further attempts
   // to map it when loading the client are guaranteed to fail.
#if VG_WORDSIZE == 4
   if (segcmd->vmaddr == 0 && 0 == VG_(strcmp)(segcmd->segname, SEG_PAGEZERO)) {
      if (segcmd->vmsize != 0x1000) {
         print("bad executable (__PAGEZERO is not 4 KB)\n");
         return -1;
      }
      return 0;
   }
#endif
#if VG_WORDSIZE == 8
   if (segcmd->vmaddr == 0 && 0 == VG_(strcmp)(segcmd->segname, SEG_PAGEZERO)) {
      if (segcmd->vmsize != 0x100000000) {
         print("bad executable (__PAGEZERO is not 4 GB)\n");
         return -1;
      }
      return 0;
   }
#endif

   // Record the segment containing the Mach headers themselves
   if (segcmd->fileoff == 0  &&  segcmd->filesize != 0) {
      if (text) *text = (vki_uint8_t *)segcmd->vmaddr;
   }

   // Record the __UNIXSTACK start
   if (0 == VG_(strcmp)(segcmd->segname, SEG_UNIXSTACK)) {
      if (stack_start) *stack_start = (vki_uint8_t *)segcmd->vmaddr;
   }

   // Sanity-check the segment
   if (segcmd->fileoff + segcmd->filesize > size) {
      print("bad executable (invalid segment command)\n");
      return -1;
   }
   if (segcmd->vmsize == 0) {
      return 0;  // nothing to map - ok
   }

   // Get desired memory protection
   // GrP fixme need maxprot too
   prot = (((segcmd->initprot & VM_PROT_READ) ? VKI_PROT_READ : 0) |
           ((segcmd->initprot & VM_PROT_WRITE) ? VKI_PROT_WRITE : 0) |
           ((segcmd->initprot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC : 0));

   // Map the segment    
   filesize = VG_PGROUNDUP(segcmd->filesize);
   vmsize = VG_PGROUNDUP(segcmd->vmsize);
   if (filesize > 0) {
      addr = (Addr)segcmd->vmaddr;
      VG_(debugLog)(2, "ume", "mmap fixed (file) (%#lx, %lu)\n", addr, filesize);
      res = VG_(am_mmap_named_file_fixed_client)(addr, filesize, prot, fd, 
                                                 offset + segcmd->fileoff, 
                                                 filename);
      check_mmap(res, addr, filesize, "load_segment1");
   }

   // Zero-fill the remainder of the segment, if any
   if (segcmd->filesize != filesize) {
      // non-page-aligned part
      // GrP fixme kernel doesn't do this?
      //bzero(segcmd->filesize+(vki_uint8_t *)addr, filesize-segcmd->filesize);
   }
   if (filesize != vmsize) {
      // page-aligned part
      SizeT length = vmsize - filesize;
      addr = (Addr)(filesize + segcmd->vmaddr);
      VG_(debugLog)(2, "ume", "mmap fixed (anon) (%#lx, %lu)\n", addr, length);
      res = VG_(am_mmap_anon_fixed_client)(addr, length, prot);
      check_mmap(res, addr, length, "load_segment2");
   }

   return 0;
}