Exemplo n.º 1
0
int main(void)
{
   struct sigaction sigsegv_new, sigsegv_saved;
   int res;

   /* Install own SIGSEGV handler */
   sigsegv_new.sa_handler  = SIGSEGV_handler;
   sigsegv_new.sa_flags    = 0;
   sigsegv_new.sa_restorer = NULL;
   res = sigemptyset( &sigsegv_new.sa_mask );
   assert(res == 0);

   res = sigaction( SIGSEGV, &sigsegv_new, &sigsegv_saved );
   assert(res == 0);

   if (__builtin_setjmp(myjmpbuf) == 0) {
      // Jump to zero; will cause seg fault
      void (*fn)(void) = 0;
      fn();
      fprintf(stderr, "Got here??\n");
   } else  {
      fprintf(stderr, "Signal caught, as expected\n");
   }

   return 0;
}
Exemplo n.º 2
0
void
bar (void *x)
{
  if (__builtin_setjmp (x))
    return;
  foo (x);
}
Exemplo n.º 3
0
void
__sjpopnthrow()
{
    struct eh_context *eh = (*get_eh_context)();
    void ***dhc = &eh->dynamic_handler_chain;
    void (*func)(void *, int);
    void *arg;
    void ***cleanup = (void***)&(*dhc)[1];

    if (cleanup[0])
    {
	double store[200];
	void **buf = (void**)store;
	buf[1] = 0;
	buf[0] = (*dhc);
	if (! __builtin_setjmp(&buf[2]))
	{
	    *dhc = buf;
	    while (cleanup[0])
	    {
		func = (void(*)(void*, int))cleanup[0][1];
		arg = (void*)cleanup[0][2];
		cleanup[0] = (void **)cleanup[0][0];
		(*func)(arg, 2);
	    }
	    *dhc = (void **)buf[0];
	}
	else
	{
	    __terminate();
	}
    }
    *dhc = (void**)(*dhc)[0];
    __sjthrow();
}
Exemplo n.º 4
0
int
main ()
{
  char *p = (char *) __builtin_alloca (20);

  strcpy (p, "test");

  if (__builtin_setjmp (buf))
    {
      if (strcmp (p, "test") != 0)
	abort ();

      exit (0);
    }

  {
    int *q = (int *) __builtin_alloca (p[2] * sizeof (int));
    int i;
    
    for (i = 0; i < p[2]; i++)
      q[i] = 0;

    while (1)
      sub2 ();
  }
}
Exemplo n.º 5
0
/* 
   Test if a piece of memory is addressable by setting up a temporary
   SIGSEGV handler, then try to touch the memory.  No signal = good,
   signal = bad.
 */
Bool VG_(is_addressable)(Addr p, Int size)
{
   volatile Char * volatile cp = (volatile Char *)p;
   volatile Bool ret;
   vki_ksigaction sa, origsa;
   vki_ksigset_t mask;

   vg_assert(size > 0);

   sa.ksa_handler = segv_handler;
   sa.ksa_flags = 0;
   VG_(ksigfillset)(&sa.ksa_mask);
   VG_(ksigaction)(VKI_SIGSEGV, &sa, &origsa);
   VG_(ksigprocmask)(VKI_SIG_SETMASK, NULL, &mask);

   if (__builtin_setjmp(&segv_jmpbuf) == 0) {
      while(size--)
	 *cp++;
      ret = True;
    } else
      ret = False;

   VG_(ksigaction)(VKI_SIGSEGV, &origsa, NULL);
   VG_(ksigprocmask)(VKI_SIG_SETMASK, &mask, NULL);

   return ret;
}
Exemplo n.º 6
0
Arquivo: body.c Projeto: AHelper/gcc
void foo()
{
  int j;

#pragma simd
  for (int i=0; i < 1000; ++i)
    {
      if (c == 6)
	__builtin_setjmp (jmpbuf); /* { dg-error "setjmp" } */
      a[i] = b[i];
    }

#pragma simd
  for (int i=0; i < 1000; ++i)
    {
      if (c==5)
	break; /* { dg-error "break statement " } */
    }

#pragma simd
  for (int i=0; i < 1000; ++i)
    {
#pragma omp for /* { dg-error "OpenMP constructs may not" } */
      for (j=0; j < 1000; ++j)
	a[i] = b[i];
    }
}
Exemplo n.º 7
0
static int is_addressable(void *p, size_t size)
{
   volatile char * volatile cp = (volatile char *)p;
   volatile int ret;
   struct sigaction sa, origsa;
   sigset_t mask;
   
   sa.sa_handler = segv_handler;
   sa.sa_flags = 0;
   sigfillset(&sa.sa_mask);
   sigaction(SIGSEGV, &sa, &origsa);
   sigprocmask(SIG_SETMASK, NULL, &mask);

   if (__builtin_setjmp(segv_jmpbuf) == 0) {
      while(size--)
	 *cp++;
      ret = 1;
    } else
      ret = 0;

   sigaction(SIGSEGV, &origsa, NULL);
   sigprocmask(SIG_SETMASK, &mask, NULL);

   return ret;
}
Exemplo n.º 8
0
int
foo (int x)
{
  int y = __builtin_setjmp (buf);
  while (x != 3 && x && x && x != 2)
    x = bar ();
  return y;
}
Exemplo n.º 9
0
// Scan a block of memory between [start, start+len).  This range may
// be bogus, inaccessable, or otherwise strange; we deal with it.  For each
// valid aligned word we assume it's a pointer to a chunk a push the chunk
// onto the mark stack if so.
static void
lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique)
{
   Addr ptr = VG_ROUNDUP(start,     sizeof(Addr));
   Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
   vki_sigset_t sigmask;

   if (VG_DEBUG_LEAKCHECK)
      VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);

   VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);

   // We might be in the middle of a page.  Do a cheap check to see if
   // it's valid;  if not, skip onto the next page.
   if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
      ptr = VG_PGROUNDUP(ptr+1);        // First page is bad - skip it.

   while (ptr < end) {
      Addr addr;

      // Skip invalid chunks.
      if ( ! MC_(is_within_valid_secondary)(ptr) ) {
         ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
         continue;
      }

      // Look to see if this page seems reasonable.
      if ((ptr % VKI_PAGE_SIZE) == 0) {
         if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
            ptr += VKI_PAGE_SIZE;      // Bad page - skip it.
            continue;
         }
      }

      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
         if ( MC_(is_valid_aligned_word)(ptr) ) {
            lc_scanned_szB += sizeof(Addr);
            addr = *(Addr *)ptr;
            // If we get here, the scanned word is in valid memory.  Now
            // let's see if its contents point to a chunk.
            lc_push_if_a_chunk_ptr(addr, clique, is_prior_definite);
         } else if (0 && VG_DEBUG_LEAKCHECK) {
            VG_(printf)("%#lx not valid\n", ptr);
         }
         ptr += sizeof(Addr);
      } else {
         // We need to restore the signal mask, because we were
         // longjmped out of a signal handler.
         VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);

         ptr = VG_PGROUNDUP(ptr+1);     // Bad page - skip it.
      }
   }

   VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
   VG_(set_fault_catcher)(NULL);
}
Exemplo n.º 10
0
int foo()
{
  __complex__ int i = 0;

  if (__builtin_setjmp(buf))
  {
    i = 1;
    bar();
  }

  return i == 0;
}
Exemplo n.º 11
0
void
__sjpopnthrow ()
{
  struct eh_context *eh = (*get_eh_context) ();
  void ***dhc = &eh->dynamic_handler_chain;
  void (*func)(void *, int);
  void *arg;
  void ***cleanup;

  /* The cleanup chain is one word into the buffer.  Get the cleanup
     chain.  */
  cleanup = (void***)&(*dhc)[1];

  /* If there are any cleanups in the chain, run them now.  */
  if (cleanup[0])
    {
      double store[200];
      void **buf = (void**)store;
      buf[1] = 0;
      buf[0] = (*dhc);

      /* try { */
#ifdef DONT_USE_BUILTIN_SETJMP
      if (! setjmp (&buf[2]))
#else
      if (! __builtin_setjmp (&buf[2]))
#endif
	{
	  *dhc = buf;
	  while (cleanup[0])
	    {
	      func = (void(*)(void*, int))cleanup[0][1];
	      arg = (void*)cleanup[0][2];

	      /* Update this before running the cleanup.  */
	      cleanup[0] = (void **)cleanup[0][0];

	      (*func)(arg, 2);
	    }
	  *dhc = buf[0];
	}
      /* catch (...) */
      else
	{
	  __terminate ();
	}
    }

  /* Then we pop the top element off the dynamic handler chain.  */
  *dhc = (void**)(*dhc)[0];

  __sjthrow ();
}
Exemplo n.º 12
0
/* Scan a block of memory between [start, start+len).  This range may
   be bogus, inaccessable, or otherwise strange; we deal with it.

   If clique != -1, it means we're gathering leaked memory into
   cliques, and clique is the index of the current clique leader. */
static void _lc_scan_memory(Addr start, SizeT len, Int clique)
{
   Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
   Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
   vki_sigset_t sigmask;

   if (VG_DEBUG_LEAKCHECK)
      VG_(printf)("scan %p-%p\n", start, start+len);
   VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
   VG_(set_fault_catcher)(scan_all_valid_memory_catcher);

   lc_scanned += end-ptr;

   if (!VG_(is_client_addr)(ptr) ||
       !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
      ptr = VG_PGROUNDUP(ptr+1);	/* first page bad */

   while (ptr < end) {
      Addr addr;

      /* Skip invalid chunks */
      if (!(*lc_is_within_valid_secondary)(ptr)) {
	 ptr = VG_ROUNDUP(ptr+1, SECONDARY_SIZE);
	 continue;
      }

      /* Look to see if this page seems reasonble */
      if ((ptr % VKI_PAGE_SIZE) == 0) {
	 if (!VG_(is_client_addr)(ptr) ||
	     !VG_(is_addressable)(ptr, sizeof(Addr), VKI_PROT_READ))
	    ptr += VKI_PAGE_SIZE; /* bad page - skip it */
      }

      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
	 if ((*lc_is_valid_aligned_word)(ptr)) {
	    addr = *(Addr *)ptr;
	    _lc_markstack_push(addr, clique);
	 } else if (0 && VG_DEBUG_LEAKCHECK)
	    VG_(printf)("%p not valid\n", ptr);
	 ptr += sizeof(Addr);
      } else {
	 /* We need to restore the signal mask, because we were
	    longjmped out of a signal handler. */
	 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);

	 ptr = VG_PGROUNDUP(ptr+1);	/* bad page - skip it */
      }
   }

   VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
   VG_(set_fault_catcher)(NULL);
}
Exemplo n.º 13
0
 real
obj1val_ASL(ASL *a, int i, real *X, fint *nerror)
{
	ASL_fg *asl;
	Jmp_buf err_jmp0;
	cde *d;
	expr *e1;
	expr_v *V;
	int ij;
	ograd *gr;
	real f;

	NNOBJ_chk(a, i, "obj1val");
	asl = (ASL_fg*)a;
	if (nerror && *nerror >= 0) {
		err_jmp = &err_jmp0;
		ij = __builtin_setjmp(err_jmp0.jb);
		if (ij) {
			*nerror = err_jmp0.err;
			f = 0.;
			goto done;
			}
		}
	want_deriv = want_derivs;
	errno = 0;	/* in case f77 set errno opening files */
	if (!asl->i.x_known)
		x0_check_ASL(asl,X);
	if (!asl->i.noxval)
		asl->i.noxval = (int*)M1zapalloc(n_obj*sizeof(int));
	co_index = -(i + 1);
	if (!(x0kind & ASL_have_objcom)) {
		if (ncom0 > combc)
			comeval_ASL(asl, combc, ncom0);
		if (comc1 < ncom1)
			com1eval_ASL(asl, comc1, ncom1);
		x0kind |= ASL_have_objcom;
		}
	d = obj_de + i;
	gr = Ograd[i];
	e1 = d->e;
	f = (*e1->op)(e1 C_ASL);
	asl->i.noxval[i] = asl->i.nxval;
	if (asl->i.vmap || asl->i.vscale)
		for(V = var_e; gr; gr = gr->next)
			f += gr->coef * V[gr->varno].v;
	else
		for(; gr; gr = gr->next)
			f += gr->coef * X[gr->varno];
 done:
	err_jmp = 0;
	return f;
	}
Exemplo n.º 14
0
int execute(int cmd) /* { dg-warning "'l1-cache-size' parameter is not a power of two 3" } */
{
  int last = 0;

  if (__builtin_setjmp (buf) == 0)
    while (1)
      {
	last = 1;
	raise0 ();
      }

  if (last == 0)
    return 0;
  else
    return cmd;
}
Exemplo n.º 15
0
int execute(int cmd)
{
  int last = 0;

  if (__builtin_setjmp (buf) == 0)
    while (1)
      {
        last = 1;
        raise ();
      }

  if (last == 0)
    return 0;
  else
    return cmd;
}
Exemplo n.º 16
0
__attribute__((noinline, noclone)) int
foo (int x)
{
  int a = 0;

  if (__builtin_setjmp (&jmp_buf) == 0)
    {
      while (1)
	{
	  a = 1;
	  bar ();  /* OK if baz () instead */
	}
    }
  else
    {
      if (a == 0)
	return 0;
      else
	return x;
    }
}
Exemplo n.º 17
0
 void
x1known_ASL(ASL *asl, real *X, fint *nerror)
{
	Jmp_buf err_jmp0;
	int ij;

	ASL_CHECK(asl, ASL_read_fg, "x1known");
	if (asl->i.xknown_ignore)
		return;
	if (nerror && *nerror >= 0) {
		err_jmp = &err_jmp0;
		ij = __builtin_setjmp(err_jmp0.jb);
		if (ij) {
			*nerror = err_jmp0.err;
			goto done;
			}
		}
	errno = 0;	/* in case f77 set errno opening files */
	x0_check_ASL((ASL_fg*)asl, X);
	asl->i.x_known = 1;
 done:
	err_jmp = 0;
	}
Exemplo n.º 18
0
int main(void)
{
   struct sigaction sigsegv_new, sigsegv_saved;
   int res;

   /* Install own SIGSEGV handler */
   sigsegv_new.sa_handler  = SIGSEGV_handler;
   sigsegv_new.sa_flags    = 0;
#if !defined(_AIX) && !defined(__APPLE__)
   sigsegv_new.sa_restorer = NULL;
#endif
   res = sigemptyset( &sigsegv_new.sa_mask );
   assert(res == 0);

   res = sigaction( SIGSEGV, &sigsegv_new, &sigsegv_saved );
   assert(res == 0);

   if (__builtin_setjmp(myjmpbuf) == 0) {
      // Jump to zero; will cause seg fault
#if defined(__powerpc64__) || defined(_AIX)
      unsigned long int fake_fndescr[3];
      fake_fndescr[0] = 0;
      fake_fndescr[1] = 0;
      fake_fndescr[2] = 0;
      ((void(*)(void)) fake_fndescr) ();
#else
      void (*fn)(void) = 0;
      fn();
#endif
      fprintf(stderr, "Got here??\n");
   } else  {
      fprintf(stderr, "Signal caught, as expected\n");
   }

   return 0;
}
Exemplo n.º 19
0
/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
   space and pass the addresses and values of all addressible,
   defined, aligned words to notify_word.  This is the basis for the
   leak detector.  Returns the number of calls made to notify_word.

   Addresses are validated 3 ways.  First we enquire whether (addr >>
   16) denotes a 64k chunk in use, by asking is_valid_64k_chunk().  If
   so, we decide for ourselves whether each x86-level (4 K) page in
   the chunk is safe to inspect.  If yes, we enquire with
   is_valid_address() whether or not each of the 1024 word-locations
   on the page is valid.  Only if so are that address and its contents
   passed to notify_word.

   This is all to avoid duplication of this machinery between the
   memcheck and addrcheck skins.  
*/
static
UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ),
                                Bool is_valid_address ( Addr ),
                                void (*notify_word)( Addr, UInt ) )
{
   /* All volatile, because some gccs seem paranoid about longjmp(). */
   volatile Bool anyValid;
   volatile Addr pageBase, addr;
   volatile UInt res, numPages, page, primaryMapNo;
   volatile UInt page_first_word, nWordsNotified;

   vki_ksigaction sigbus_saved;
   vki_ksigaction sigbus_new;
   vki_ksigaction sigsegv_saved;
   vki_ksigaction sigsegv_new;
   vki_ksigset_t  blockmask_saved;
   vki_ksigset_t  unblockmask_new;

   /* Temporarily install a new sigsegv and sigbus handler, and make
      sure SIGBUS, SIGSEGV and SIGTERM are unblocked.  (Perhaps the
      first two can never be blocked anyway?)  */

   sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
   sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
   sigbus_new.ksa_restorer = NULL;
   res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
   sk_assert(res == 0);

   sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
   sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
   sigsegv_new.ksa_restorer = NULL;
   res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
   sk_assert(res == 0+0);

   res =  VG_(ksigemptyset)( &unblockmask_new );
   res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
   res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
   res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
   sk_assert(res == 0+0+0);

   res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
   sk_assert(res == 0+0+0+0);

   res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
   sk_assert(res == 0+0+0+0+0);

   res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
   sk_assert(res == 0+0+0+0+0+0);

   /* The signal handlers are installed.  Actually do the memory scan. */
   numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
   sk_assert(numPages == 1048576);
   sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));

   nWordsNotified = 0;

   for (page = 0; page < numPages; page++) {

      /* Base address of this 4k page. */
      pageBase = page << VKI_BYTES_PER_PAGE_BITS;

      /* Skip if this page is in an unused 64k chunk. */
      primaryMapNo = pageBase >> 16;
      if (!is_valid_64k_chunk(primaryMapNo))
         continue;

      /* Next, establish whether or not we want to consider any
         locations on this page.  We need to do so before actually
         prodding it, because prodding it when in fact it is not
         needed can cause a page fault which under some rare
         circumstances can cause the kernel to extend the stack
         segment all the way down to here, which is seriously bad.
         Hence: */
      anyValid = False;
      for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
         if (is_valid_address(addr)) {
            anyValid = True;
            break;
         }
      }

      if (!anyValid)
         continue;  /* nothing interesting here .. move to the next page */

      /* Ok, we have to prod cautiously at the page and see if it
         explodes or not. */
      if (__builtin_setjmp(memscan_jmpbuf) == 0) {
         /* try this ... */
         page_first_word = * (volatile UInt*)pageBase;
         /* we get here if we didn't get a fault */
         /* Scan the page */
         for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
            if (is_valid_address(addr)) {
               nWordsNotified++;
               notify_word ( addr, *(UInt*)addr );
	    }
         }
      } else {
         /* We get here if reading the first word of the page caused a
            fault, which in turn caused the signal handler to longjmp.
            Ignore this page. */
         if (0)
         VG_(printf)(
            "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
            (void*)pageBase 
         );
      }
   }

   /* Restore signal state to whatever it was before. */
   res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
   sk_assert(res == 0 +0);

   res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
   sk_assert(res == 0 +0 +0);

   res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
   sk_assert(res == 0 +0 +0 +0);

   return nWordsNotified;
}
Exemplo n.º 20
0
 void
obj1grd_ASL(ASL *a, int i, real *X, real *G, fint *nerror)
{
	ASL_fg *asl;
	Jmp_buf err_jmp0;
	cde *d;
	fint ne0;
	int ij, j, *vmi, xksave, *z;
	ograd *gr, **gr0;
	real *Adjoints, *vscale;
	size_t L;
	static char who[] = "obj1grd";

	NNOBJ_chk(a, i, who);
	asl = (ASL_fg*)a;
	if (!want_derivs)
		No_derivs_ASL(who);
	ne0 = -1;
	if (nerror && (ne0 = *nerror) >= 0) {
		err_jmp = &err_jmp0;
		ij = __builtin_setjmp(err_jmp0.jb);
		if (ij) {
			*nerror = err_jmp0.err;
			goto done;
			}
		}
	errno = 0;	/* in case f77 set errno opening files */
	if (!asl->i.x_known)
		x0_check_ASL(asl,X);
	if (!asl->i.noxval || asl->i.noxval[i] != asl->i.nxval) {
		xksave = asl->i.x_known;
		asl->i.x_known = 1;
		obj1val_ASL(a, i, X, nerror);
		asl->i.x_known = xksave;
		if (ne0 >= 0 && *nerror)
			goto done;
		}
	if (asl->i.Derrs)
		deriv_errchk_ASL(a, nerror, -(i+1), 1);
	if (f_b)
		funnelset_ASL(asl, f_b);
	if (f_o)
		funnelset_ASL(asl, f_o);
	Adjoints = adjoints;
	d = obj_de + i;
	gr0 = Ograd + i;
	for(gr = *gr0; gr; gr = gr->next)
		Adjoints[gr->varno] = gr->coef;
	if ((L = d->zaplen)) {
		memset(adjoints_nv1, 0, L);
		derprop(d->d);
		}
	if (zerograds) {	/* sparse gradients */
		z = zerograds[i];
		while((i = *z++) >= 0)
			G[i] = 0;
		}
	gr = *gr0;
	vmi = 0;
	if (asl->i.vmap)
		vmi = get_vminv_ASL(a);
	if ((vscale = asl->i.vscale)) {
		if (vmi)
			for(; gr; gr = gr->next) {
				j = vmi[i = gr->varno];
				G[j] = Adjoints[i] * vscale[j];
				}
		else
			for(; gr; gr = gr->next) {
				i = gr->varno;
				G[i] = Adjoints[i] * vscale[i];
				}
		}
	else if (vmi)
		for(; gr; gr = gr->next) {
			i = gr->varno;
			G[vmi[i]] = Adjoints[i];
			}
	else
		for(; gr; gr = gr->next) {
			i = gr->varno;
			G[i] = Adjoints[i];
			}
 done:
	err_jmp = 0;
	}
int main(void) {
  __builtin_setjmp(0);
  jumpaway(0);
}
Exemplo n.º 22
0
void
foo (void)
{
  __builtin_setjmp (jmpbuf);
}
Exemplo n.º 23
0
Bool VG_(machine_get_hwcaps)( void )
{
   vg_assert(hwcaps_done == False);
   hwcaps_done = True;

   // Whack default settings into vai, so that we only need to fill in
   // any interesting bits.
   LibVEX_default_VexArchInfo(&vai);

#if defined(VGA_x86)
   { Bool have_sse1, have_sse2;
     UInt eax, ebx, ecx, edx;

     if (!VG_(has_cpuid)())
        /* we can't do cpuid at all.  Give up. */
        return False;

     VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
     if (eax < 1)
        /* we can't ask for cpuid(x) for x > 0.  Give up. */
        return False;

     /* get capabilities bits into edx */
     VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);

     have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
     have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */

     if (have_sse2 && have_sse1) {
        va          = VexArchX86;
        vai.hwcaps  = VEX_HWCAPS_X86_SSE1;
        vai.hwcaps |= VEX_HWCAPS_X86_SSE2;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     if (have_sse1) {
        va          = VexArchX86;
        vai.hwcaps  = VEX_HWCAPS_X86_SSE1;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     va         = VexArchX86;
     vai.hwcaps = 0; /*baseline - no sse at all*/
     VG_(machine_x86_have_mxcsr) = 0;
     return True;
   }

#elif defined(VGA_amd64)
   vg_assert(VG_(has_cpuid)());
   va         = VexArchAMD64;
   vai.hwcaps = 0; /*baseline - SSE2 */
   return True;

#elif defined(VGA_ppc32)
   { /* ppc32 doesn't seem to have a sane way to find out what insn
        sets the CPU supports.  So we have to arse around with
        SIGILLs.  Yuck. */
     vki_sigset_t         saved_set, tmp_set;
     struct vki_sigaction saved_act, tmp_act;

     volatile Bool have_F, have_V, have_FX, have_GX;
     Int r;

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);

     r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
     vg_assert(r == 0);

     r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_act);
     vg_assert(r == 0);
     tmp_act = saved_act;

     /* NODEFER: signal handler does not return (from the kernel's point of
        view), hence if it is to successfully catch a signal more than once,
        we need the NODEFER flag. */
     tmp_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_act.sa_flags |=  VKI_SA_NODEFER;

     /* standard FP insns */
     have_F = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_F = False;
     } else {
        __asm__ __volatile__(".long 0xFC000090"); /*fmr 0,0 */
     }

     /* Altivec insns */
     have_V = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_V = False;
     } else {
        /* Unfortunately some older assemblers don't speak Altivec (or
           choose not to), so to be safe we directly emit the 32-bit
           word corresponding to "vor 0,0,0".  This fixes a build
           problem that happens on Debian 3.1 (ppc32), and probably
           various other places. */
        __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
     }

     /* General-Purpose optional (fsqrt, fsqrts) */
     have_FX = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_FX = False;
     } else {
        __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0 */
     }

     /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
     have_GX = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_GX = False;
     } else {
        __asm__ __volatile__(".long 0xFC000034"); /* frsqrte 0,0 */
     }

     r = VG_(sigaction)(VKI_SIGILL, &saved_act, NULL);
     vg_assert(r == 0);
     r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
     vg_assert(r == 0);
     /*
        VG_(printf)("F %d V %d FX %d GX %d\n", 
                    (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
     */
     /* Make FP a prerequisite for VMX (bogusly so), and for FX and GX. */
     if (have_V && !have_F)
        have_V = False;
     if (have_FX && !have_F)
        have_FX = False;
     if (have_GX && !have_F)
        have_GX = False;

     VG_(machine_ppc32_has_FP)  = have_F ? 1 : 0;
     VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;

     va = VexArchPPC32;

     vai.hwcaps = 0;
     if (have_F)  vai.hwcaps |= VEX_HWCAPS_PPC32_F;
     if (have_V)  vai.hwcaps |= VEX_HWCAPS_PPC32_V;
     if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC32_FX;
     if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC32_GX;

     /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#elif defined(VGA_ppc64)
   { /* Same idiocy as for ppc32 - arse around with SIGILLs. */
     vki_sigset_t         saved_set, tmp_set;
     struct vki_sigaction saved_act, tmp_act;

     volatile Bool have_F, have_V, have_FX, have_GX;

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);

     VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);

     VG_(sigaction)(VKI_SIGILL, NULL, &saved_act);
     tmp_act = saved_act;

     /* NODEFER: signal handler does not return (from the kernel's point of
        view), hence if it is to successfully catch a signal more than once,
        we need the NODEFER flag. */
     tmp_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_act.sa_flags |=  VKI_SA_NODEFER;

     /* standard FP insns */
     have_F = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_F = False;
     } else {
        __asm__ __volatile__("fmr 0,0");
     }

     /* Altivec insns */
     have_V = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_V = False;
     } else {
        __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
     }

     /* General-Purpose optional (fsqrt, fsqrts) */
     have_FX = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_FX = False;
     } else {
        __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
     }

     /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
     have_GX = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_GX = False;
     } else {
        __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
     }

     VG_(sigaction)(VKI_SIGILL, &saved_act, NULL);
     VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
     /*
     if (0)
        VG_(printf)("F %d V %d FX %d GX %d\n", 
                    (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
     */
     /* on ppc64, if we don't even have FP, just give up. */
     if (!have_F)
        return False;

     VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;

     va = VexArchPPC64;

     vai.hwcaps = 0;
     if (have_V)  vai.hwcaps |= VEX_HWCAPS_PPC64_V;
     if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC64_FX;
     if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC64_GX;

     /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#else
#  error "Unknown arch"
#endif
}
Exemplo n.º 24
0
Bool VG_(machine_get_hwcaps)( void )
{
   vg_assert(hwcaps_done == False);
   hwcaps_done = True;

   // Whack default settings into vai, so that we only need to fill in
   // any interesting bits.
   LibVEX_default_VexArchInfo(&vai);

#if defined(VGA_x86)
   { Bool have_sse1, have_sse2;
     UInt eax, ebx, ecx, edx;

     if (!VG_(has_cpuid)())
        /* we can't do cpuid at all.  Give up. */
        return False;

     VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
     if (eax < 1)
        /* we can't ask for cpuid(x) for x > 0.  Give up. */
        return False;

     /* get capabilities bits into edx */
     VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);

     have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
     have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */

     if (have_sse2 && have_sse1) {
        va          = VexArchX86;
        vai.subarch = VexSubArchX86_sse2;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     if (have_sse1) {
        va          = VexArchX86;
        vai.subarch = VexSubArchX86_sse1;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     va          = VexArchX86;
     vai.subarch = VexSubArchX86_sse0;
     VG_(machine_x86_have_mxcsr) = 0;
     return True;
   }

#elif defined(VGA_amd64)
   vg_assert(VG_(has_cpuid)());
   va          = VexArchAMD64;
   vai.subarch = VexSubArch_NONE;
   return True;

#elif defined(VGA_ppc32)
   { /* ppc32 doesn't seem to have a sane way to find out what insn
        sets the CPU supports.  So we have to arse around with
        SIGILLs.  Yuck. */
     vki_sigset_t         saved_set, tmp_set;
     struct vki_sigaction saved_act, tmp_act;

     volatile Bool have_fp, have_vmx;

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);

     VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);

     VG_(sigaction)(VKI_SIGILL, NULL, &saved_act);
     tmp_act = saved_act;

     tmp_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_act.sa_flags &= ~VKI_SA_SIGINFO;

     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);

     have_fp = True;
     if (__builtin_setjmp(env_sigill)) {
        have_fp = False;
     } else {
        __asm__ __volatile__("fmr 0,0");
     }

     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);

     have_vmx = True;
     if (__builtin_setjmp(env_sigill)) {
        have_vmx = False;
     } else {
        __asm__ __volatile__("vor 0,0,0");
     }

     VG_(sigaction)(VKI_SIGILL, &saved_act, NULL);
     VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);

     /* VG_(printf)("FP %d VMX %d\n", (Int)have_fp, (Int)have_vmx); */

     /* We can only support 3 cases, not 4 (vmx but no fp).  So make
	fp a prerequisite for vmx. */
     if (have_vmx && !have_fp)
        have_vmx = False;

     VG_(machine_ppc32_has_FP)  = have_fp  ? 1 : 0;
     VG_(machine_ppc32_has_VMX) = have_vmx ? 1 : 0;

     va = VexArchPPC32;

     if (have_fp == False && have_vmx == False) {
        vai.subarch = VexSubArchPPC32_I;
     }
     else if (have_fp == True && have_vmx == False) {
        vai.subarch = VexSubArchPPC32_FI;
     }
     else if (have_fp == True && have_vmx == True) {
        vai.subarch = VexSubArchPPC32_VFI;
     } else {
        /* this can't happen. */
        vg_assert2(0, "VG_(machine_get_hwcaps)(ppc32)");
     }

     /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#elif defined(VGA_ppc64)
   { vki_sigset_t         saved_set, tmp_set;
     struct vki_sigaction saved_act, tmp_act;

     volatile Bool have_vmx;

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);

     VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);

     VG_(sigaction)(VKI_SIGILL, NULL, &saved_act);
     tmp_act = saved_act;

     tmp_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_act.sa_flags &= ~VKI_SA_SIGINFO;

     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);

     have_vmx = True;
     if (__builtin_setjmp(env_sigill)) {
        have_vmx = False;
     } else {
        __asm__ __volatile__("vor 0,0,0");
     }

     VG_(sigaction)(VKI_SIGILL, &saved_act, NULL);
     VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);

     /* VG_(printf)("VMX %d\n", (Int)have_vmx); */

     VG_(machine_ppc64_has_VMX) = have_vmx ? 1 : 0;

     va = VexArchPPC64;
     vai.subarch = have_vmx ? VexSubArchPPC64_VFI : VexSubArchPPC64_FI;

     /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#else
#  error "Unknown arch"
#endif
}
Exemplo n.º 25
0
void
__sjthrow ()
{
  struct eh_context *eh = (*get_eh_context) ();
  void ***dhc = &eh->dynamic_handler_chain;
  void *jmpbuf;
  void (*func)(void *, int);
  void *arg;
  void ***cleanup;

  /* The cleanup chain is one word into the buffer.  Get the cleanup
     chain.  */
  cleanup = (void***)&(*dhc)[1];

  /* If there are any cleanups in the chain, run them now.  */
  if (cleanup[0])
    {
      double store[200];
      void **buf = (void**)store;
      buf[1] = 0;
      buf[0] = (*dhc);

      /* try { */
#ifdef DONT_USE_BUILTIN_SETJMP
      if (! setjmp (&buf[2]))
#else
      if (! __builtin_setjmp (&buf[2]))
#endif
	{
	  *dhc = buf;
	  while (cleanup[0])
	    {
	      func = (void(*)(void*, int))cleanup[0][1];
	      arg = (void*)cleanup[0][2];

	      /* Update this before running the cleanup.  */
	      cleanup[0] = (void **)cleanup[0][0];

	      (*func)(arg, 2);
	    }
	  *dhc = buf[0];
	}
      /* catch (...) */
      else
	{
	  __terminate ();	}
    }
  
  /* We must call terminate if we try and rethrow an exception, when
     there is no exception currently active and when there are no
     handlers left.  */
  if (! eh->info || (*dhc)[0] == 0)
    __terminate ();
    
  /* Find the jmpbuf associated with the top element of the dynamic
     handler chain.  The jumpbuf starts two words into the buffer.  */
  jmpbuf = &(*dhc)[2];

  /* Then we pop the top element off the dynamic handler chain.  */
  *dhc = (void**)(*dhc)[0];

  /* And then we jump to the handler.  */

#ifdef DONT_USE_BUILTIN_SETJMP
  longjmp (jmpbuf, 1);
#else
  __builtin_longjmp (jmpbuf, 1);
#endif


}
Exemplo n.º 26
0
Bool VG_(machine_get_hwcaps)( void )
{
   vg_assert(hwcaps_done == False);
   hwcaps_done = True;

   // Whack default settings into vai, so that we only need to fill in
   // any interesting bits.
   LibVEX_default_VexArchInfo(&vai);

#if defined(VGA_x86)
   { Bool have_sse1, have_sse2, have_cx8;
     UInt eax, ebx, ecx, edx;

     if (!VG_(has_cpuid)())
        /* we can't do cpuid at all.  Give up. */
        return False;

     VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
     if (eax < 1)
        /* we can't ask for cpuid(x) for x > 0.  Give up. */
        return False;

     /* get capabilities bits into edx */
     VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);

     have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
     have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */

     /* cmpxchg8b is a minimum requirement now; if we don't have it we
        must simply give up.  But all CPUs since Pentium-I have it, so
        that doesn't seem like much of a restriction. */
     have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
     if (!have_cx8)
        return False;

     if (have_sse2 && have_sse1) {
        va          = VexArchX86;
        vai.hwcaps  = VEX_HWCAPS_X86_SSE1;
        vai.hwcaps |= VEX_HWCAPS_X86_SSE2;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     if (have_sse1) {
        va          = VexArchX86;
        vai.hwcaps  = VEX_HWCAPS_X86_SSE1;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     va         = VexArchX86;
     vai.hwcaps = 0; /*baseline - no sse at all*/
     VG_(machine_x86_have_mxcsr) = 0;
     return True;
   }

#elif defined(VGA_amd64)
   { Bool have_sse1, have_sse2, have_sse3, have_cx8, have_cx16;
     UInt eax, ebx, ecx, edx;

     if (!VG_(has_cpuid)())
        /* we can't do cpuid at all.  Give up. */
        return False;

     VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
     if (eax < 1)
        /* we can't ask for cpuid(x) for x > 0.  Give up. */
        return False;

     /* get capabilities bits into edx */
     VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);

     have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
     have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
     have_sse3 = (ecx & (1<<0)) != 0;  /* True => have sse3 insns */

     /* cmpxchg8b is a minimum requirement now; if we don't have it we
        must simply give up.  But all CPUs since Pentium-I have it, so
        that doesn't seem like much of a restriction. */
     have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
     if (!have_cx8)
        return False;

     /* on amd64 we tolerate older cpus, which don't have cmpxchg16b */
     have_cx16 = (ecx & (1<<13)) != 0; /* True => have cmpxchg16b */

     va         = VexArchAMD64;
     vai.hwcaps = (have_sse3 ? VEX_HWCAPS_AMD64_SSE3 : 0)
                  | (have_cx16 ? VEX_HWCAPS_AMD64_CX16 : 0);
     return True;
   }

#elif defined(VGA_ppc32)
   {
     /* Find out which subset of the ppc32 instruction set is supported by
        verifying whether various ppc32 instructions generate a SIGILL
        or a SIGFPE. An alternative approach is to check the AT_HWCAP and
        AT_PLATFORM entries in the ELF auxiliary table -- see also
        the_iifii.client_auxv in m_main.c.
      */
     vki_sigset_t          saved_set, tmp_set;
     vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
     vki_sigaction_toK_t     tmp_sigill_act,   tmp_sigfpe_act;

     volatile Bool have_F, have_V, have_FX, have_GX;
     Int r;

     /* This is a kludge.  Really we ought to back-convert saved_act
        into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
        since that's a no-op on all ppc32 platforms so far supported,
        it's not worth the typing effort.  At least include most basic
        sanity check: */
     vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);
     VG_(sigaddset)(&tmp_set, VKI_SIGFPE);

     r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
     vg_assert(r == 0);

     r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
     vg_assert(r == 0);
     tmp_sigill_act = saved_sigill_act;

     r = VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
     vg_assert(r == 0);
     tmp_sigfpe_act = saved_sigfpe_act;

     /* NODEFER: signal handler does not return (from the kernel's point of
        view), hence if it is to successfully catch a signal more than once,
        we need the NODEFER flag. */
     tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_sigill_act.sa_flags |=  VKI_SA_NODEFER;
     tmp_sigill_act.ksa_handler = handler_unsup_insn;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
     vg_assert(r == 0);

     tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_sigfpe_act.sa_flags |=  VKI_SA_NODEFER;
     tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
     r = VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
     vg_assert(r == 0);

     /* standard FP insns */
     have_F = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_F = False;
     } else {
        __asm__ __volatile__(".long 0xFC000090"); /*fmr 0,0 */
     }

     /* Altivec insns */
     have_V = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_V = False;
     } else {
        /* Unfortunately some older assemblers don't speak Altivec (or
           choose not to), so to be safe we directly emit the 32-bit
           word corresponding to "vor 0,0,0".  This fixes a build
           problem that happens on Debian 3.1 (ppc32), and probably
           various other places. */
        __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
     }

     /* General-Purpose optional (fsqrt, fsqrts) */
     have_FX = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_FX = False;
     } else {
        __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0 */
     }

     /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
     have_GX = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_GX = False;
     } else {
        __asm__ __volatile__(".long 0xFC000034"); /* frsqrte 0,0 */
     }

     r = VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
     vg_assert(r == 0);
     r = VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
     vg_assert(r == 0);
     r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
     vg_assert(r == 0);
     VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d\n", 
                    (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
     /* Make FP a prerequisite for VMX (bogusly so), and for FX and GX. */
     if (have_V && !have_F)
        have_V = False;
     if (have_FX && !have_F)
        have_FX = False;
     if (have_GX && !have_F)
        have_GX = False;

     VG_(machine_ppc32_has_FP)  = have_F ? 1 : 0;
     VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;

     va = VexArchPPC32;

     vai.hwcaps = 0;
     if (have_F)  vai.hwcaps |= VEX_HWCAPS_PPC32_F;
     if (have_V)  vai.hwcaps |= VEX_HWCAPS_PPC32_V;
     if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC32_FX;
     if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC32_GX;

     /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#elif defined(VGA_ppc64)
   {
     /* Same instruction set detection algorithm as for ppc32. */
     vki_sigset_t          saved_set, tmp_set;
     vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
     vki_sigaction_toK_t     tmp_sigill_act,   tmp_sigfpe_act;

     volatile Bool have_F, have_V, have_FX, have_GX;
     Int r;

     /* This is a kludge.  Really we ought to back-convert saved_act
        into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
        since that's a no-op on all ppc64 platforms so far supported,
        it's not worth the typing effort.  At least include most basic
        sanity check: */
     vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);
     VG_(sigaddset)(&tmp_set, VKI_SIGFPE);

     r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
     vg_assert(r == 0);

     r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
     vg_assert(r == 0);
     tmp_sigill_act = saved_sigill_act;

     VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
     tmp_sigfpe_act = saved_sigfpe_act;

     /* NODEFER: signal handler does not return (from the kernel's point of
        view), hence if it is to successfully catch a signal more than once,
        we need the NODEFER flag. */
     tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_sigill_act.sa_flags |=  VKI_SA_NODEFER;
     tmp_sigill_act.ksa_handler = handler_unsup_insn;
     VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);

     tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_sigfpe_act.sa_flags |=  VKI_SA_NODEFER;
     tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
     VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);

     /* standard FP insns */
     have_F = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_F = False;
     } else {
        __asm__ __volatile__("fmr 0,0");
     }

     /* Altivec insns */
     have_V = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_V = False;
     } else {
        __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
     }

     /* General-Purpose optional (fsqrt, fsqrts) */
     have_FX = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_FX = False;
     } else {
        __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
     }

     /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
     have_GX = True;
     if (__builtin_setjmp(env_unsup_insn)) {
        have_GX = False;
     } else {
        __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
     }

     VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
     VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
     VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
     VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d\n", 
                    (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
     /* on ppc64, if we don't even have FP, just give up. */
     if (!have_F)
        return False;

     VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;

     va = VexArchPPC64;

     vai.hwcaps = 0;
     if (have_V)  vai.hwcaps |= VEX_HWCAPS_PPC64_V;
     if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC64_FX;
     if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC64_GX;

     /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#else
#  error "Unknown arch"
#endif
}
Exemplo n.º 27
0
void emu_loop(bool reset)
{
    #if OS_HAS_PAGEFAULT_HANDLER
        os_exception_frame_t seh_frame = { NULL, NULL };
        os_faulthandler_arm(&seh_frame);
    #endif

    if(reset)
    {
        reset:
        emu_reset();
    }

    gdbstub_reset();

    addr_cache_flush();
    flush_translations();

    sched_update_next_event(0);

    exiting = false;

// clang segfaults with that, for an iOS build :(
#ifndef NO_SETJMP
    // Workaround for LLVM bug #18974
    while(__builtin_setjmp(restart_after_exception)){};
#endif

    while (!exiting) {
        sched_process_pending_events();
        while (!exiting && cycle_count_delta < 0) {
            if (cpu_events & EVENT_RESET) {
                gui_status_printf("Reset");
                goto reset;
            }

            if (cpu_events & (EVENT_FIQ | EVENT_IRQ)) {
                // Align PC in case the interrupt occurred immediately after a jump
                if (arm.cpsr_low28 & 0x20)
                    arm.reg[15] &= ~1;
                else
                    arm.reg[15] &= ~3;

                if (cpu_events & EVENT_WAITING)
                    arm.reg[15] += 4; // Skip over wait instruction

                arm.reg[15] += 4;
                cpu_exception((cpu_events & EVENT_FIQ) ? EX_FIQ : EX_IRQ);
            }
            cpu_events &= ~EVENT_WAITING;

            if (arm.cpsr_low28 & 0x20)
                cpu_thumb_loop();
            else
                cpu_arm_loop();
        }
    }

    #if OS_HAS_PAGEFAULT_HANDLER
        os_faulthandler_unarm(&seh_frame);
    #endif
}