int main()
{
	/*
 	* We would typically use this function in an embedded appliction where 
 	* memory was a critical resource.If we have some complex calculation,we
 	* may want it to be folded if it involves constants,but need to call a
 	* function if it does not.
 	*/
	int a = 10;
	int const b = 10;
	if(__builtin_constant_p(a))
		printf("The a is constant\n");
	if(__builtin_constant_p(10))
		printf("The 10 is constant\n");
	if(__builtin_constant_p(b))
		printf("The const value is constant\n");
	/*
	 * This is an acceptable initializer even if EXPRESSION is not a 
	 * constant expression.GCC must be more conservative about evaluating
	 * the built-in int this case,because it has no opportunity to perform
	 * optimization.
	 */
	buddy_constant_inline(10);
	buddy_constant_no_inline(10);
	return 0;
}
示例#2
0
int
fn1 (void)
{
  if (__builtin_constant_p ()) /* { dg-error "7:not enough" } */
    return 0;
  if (__builtin_constant_p (1, 2)) /* { dg-error "7:too many" } */
    return 1;
  if (__builtin_isfinite ()) /* { dg-error "7:not enough" } */
    return 3;
  if (__builtin_isfinite (1, 2)) /* { dg-error "7:too many" } */
    return 4;
  if (__builtin_isless (0)) /* { dg-error "7:not enough" } */
    return 5;
  if (__builtin_isless (1, 2, 3)) /* { dg-error "7:too many" } */
    return 6;
  if (__builtin_fpclassify (1, 2, 3, 4, 5)) /* { dg-error "7:not enough" } */
    return 7;
  if (__builtin_fpclassify (1, 2, 3, 4, 5, r, 6)) /* { dg-error "7:too many" } */
    return 8;
  if (__builtin_assume_aligned (p)) /* { dg-error "7:too few" } */
    return 9;
  if (__builtin_assume_aligned (p, r, p, p)) /* { dg-error "7:too many" } */
    return 10;
  if (__builtin_add_overflow ()) /* { dg-error "7:not enough" } */
    return 11;
  if (__builtin_add_overflow (1, 2, 3, &r)) /* { dg-error "7:too many" } */
    return 12;
  return -1;
}
示例#3
0
int main() {
    int i = 5;
    printf("__builtin_constant_p(i) is %d\n", __builtin_constant_p(i));
    printf("__builtin_constant_p(PREDEFINED_VAL) is %d\n", __builtin_constant_p(PREDEFINED_VAL));
    printf("__builtin_constant_p(100) is %d\n", __builtin_constant_p(100));

    return 0;
}
示例#4
0
文件: pr36513.c 项目: sylvestre/gcc
int main1 ()
{
  char *s, t;
  (__extension__ (__builtin_constant_p (t) 
		  && !__builtin_constant_p (s) 
		  && (t) == '\0' 
		  ? (char *) __rawmemchr (s, t) 
		  : __builtin_strchr (s, t)));
  return 0;
}
示例#5
0
static void
__bb_init_prg ()
{
  const char *p;

      {
 unsigned long l;

 (__extension__ (__builtin_constant_p (p) && __builtin_constant_p (l)
   ? 5 : 2));
      }

}
示例#6
0
main()
{
    char x = *(char *)(long)"hi";

    printf("endian: %d\n", (*(unsigned short *)"\xff\x00" < 0x100));

    _Static_assert(
        __builtin_constant_p(*(char *)(long)"hi"),
        "word casts should be constant");

    _Static_assert(
        !__builtin_constant_p(*(unsigned short *)"\xff\x00"),
        "endian dependent cast/deref result");
}
示例#7
0
void direct2indirect(void)
{
    struct item_head *p_le_ih;
    struct item_head ind_ih;
    unsigned int unfm_ptr;

    if (__builtin_expect(32, 0)) __asm__ ("break");

    set_le_ih_k_type (&ind_ih);

    if (__builtin_constant_p(p_le_ih) ? 1 : 2) {
        (__builtin_constant_p(__builtin_constant_p(1) == 1));
      boo(&ind_ih, (char *)&unfm_ptr);
    }
}
示例#8
0
文件: cache_arc700.c 项目: 3CTO/linux
/*
 * Common Helper for Line Operations on {I,D}-Cache
 */
static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
				     unsigned long sz, const int cacheop)
{
	unsigned int aux_cmd, aux_tag;
	int num_lines;
	const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (cacheop == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
#if (CONFIG_ARC_MMU_VER > 2)
		aux_tag = ARC_REG_IC_PTAG;
#endif
	}
	else {
		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
		aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
#if (CONFIG_ARC_MMU_VER > 2)
		aux_tag = ARC_REG_DC_PTAG;
#endif
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!full_page_op) {
		sz += paddr & ~CACHE_LINE_MASK;
		paddr &= CACHE_LINE_MASK;
		vaddr &= CACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

#if (CONFIG_ARC_MMU_VER <= 2)
	/* MMUv2 and before: paddr contains stuffed vaddrs bits */
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#else
	/* if V-P const for loop, PTAG can be written once outside loop */
	if (full_page_op)
		write_aux_reg(aux_tag, paddr);
#endif

	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
		/* MMUv3, cache ops require paddr seperately */
		if (!full_page_op) {
			write_aux_reg(aux_tag, paddr);
			paddr += L1_CACHE_BYTES;
		}

		write_aux_reg(aux_cmd, vaddr);
		vaddr += L1_CACHE_BYTES;
#else
		write_aux_reg(aux_cmd, paddr);
		paddr += L1_CACHE_BYTES;
#endif
	}
}
示例#9
0
文件: cache.c 项目: DIGImend/linux
/*
 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
 * maintenance ops (in IVIL reg), as long as icache doesn't alias.
 *
 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
 * specified in PTAG (similar to MMU v3)
 */
static inline
void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
			  unsigned long sz, const int cacheop)
{
	unsigned int aux_cmd;
	int num_lines;
	const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (cacheop == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
	} else {
		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
		aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!full_page_op) {
		sz += paddr & ~CACHE_LINE_MASK;
		paddr &= CACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

	while (num_lines-- > 0) {
		write_aux_reg(aux_cmd, paddr);
		paddr += L1_CACHE_BYTES;
	}
}
示例#10
0
文件: pr16746c.c 项目: ChrisG0x20/gdb
int
main (int argc, char **argv)
{
  if (__builtin_constant_p (argc))
    foobar ();
  return 0;
}
示例#11
0
文件: memset-1.c 项目: 0day-ci/gcc
extern inline __attribute__((gnu_inline, always_inline, artificial)) void *
memset (void *dest, int ch, size_t len)
{
  if (__builtin_constant_p (len) && len == 0)
    {
      warn_memset_zero_len ();	/* { dg-warning "memset used with constant zero" } */
      return dest;
    }
  return __builtin_memset (dest, ch, len);
}
示例#12
0
static inline int digitalReadSafe(uint8_t pin) {
  if(!__builtin_constant_p(pin)) {
    return digitalRead(pin);
  }
  else {
    if(!DIGITALIO_NO_MIX_ANALOGWRITE)
      noAnalogWrite(pin);
    return digitalReadFast(pin);
  }
}
示例#13
0
文件: pr42998.c 项目: 0day-ci/gcc
void ndisc_fill_addr_option(unsigned char *opt, int data_len,
			    unsigned short addr_type) 
{
  int pad;
  if (addr_type == 32)
    pad = 2;
  else
    pad = 0;
  __builtin_memset(opt + 2, 0, pad);
  opt += pad;
  __builtin_constant_p(data_len) ? foo (opt+2) : bar (opt+2);
}
示例#14
0
int main(int argc, char **argv) {
  int a;
  a = __builtin_bswap32(a);
  a = __builtin_bswap64(a);
  a = __builtin_constant_p(1);
  a = __builtin_constant_p("string");
  char *b = __builtin_strchr("string", 's');
  a = __builtin_expect(1, a);
  a = __builtin_strlen("string");
  a = __builtin_strcmp("string1", "string2");
  a = __builtin_offsetof(struct point, y);
  char c[100];
  b = __builtin_strcpy(c, "a");
  b = __builtin_strncpy(c, "a", 1);
  a = __builtin_ctzl(a);
  varargsfn(0);
  __builtin_prefetch(b);
  __builtin_prefetch(b, 1);
  __builtin_prefetch(b, 1, 1);
  return a;
}
示例#15
0
static inline void pinModeSafe(uint8_t pin, uint8_t mode) {
  if(!__builtin_constant_p(pin)) {
    pinMode(pin, mode);
  }
  else {
    if((mode == INPUT || mode == INPUT_PULLUP) && !DIGITALIO_NO_MIX_ANALOGWRITE)
      noAnalogWrite(pin);

    const bool write_is_atomic = DIGITALIO_NO_INTERRUPT_SAFETY
      || (__builtin_constant_p(mode)
          && mode == OUTPUT
          && _directionIsAtomic(pin));
    if(write_is_atomic) {
      pinModeFast(pin, mode);
    }
    else {
      ATOMIC_BLOCK(ATOMIC_RESTORESTATE)
      {
        pinModeFast(pin, mode);
      }
    }
  }
}
示例#16
0
int do_something (int size)
{
  if (__builtin_constant_p (size))
    switch (size)
      {
	case 1:do_something1 (); break;
	case 2:do_something2 (); break;
	case 5:do_something1 ();  do_something1 ();
	case 3:do_something3 (); break;
	case 4:do_something4 (); break;
      }
  else
    do_something_big (size);
}
示例#17
0
文件: 20000720-1.c 项目: 0day-ci/gcc
static int
dead(unsigned short *v, char *w, unsigned char *x, int y, int z)
{
  int i = 0;
  unsigned short j = *v;

  while (y > 0) {
    ((baz)x)->a = j;
    ((baz)x)->b = 0;
    ((baz)x)->c = 0;
    ((baz)x)->d = 0;
    __builtin_constant_p(i) ? foo(x, w, i) : bar(x, w, i);
  }
  return z - y;
}
示例#18
0
文件: cache.c 项目: srgmzk/linux
static inline
void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
			  unsigned long sz, const int op)
{
	unsigned int aux_cmd, aux_tag;
	int num_lines;
	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (op == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
		aux_tag = ARC_REG_IC_PTAG;
	} else {
		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
		aux_tag = ARC_REG_DC_PTAG;
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
示例#19
0
文件: cache.c 项目: DIGImend/linux
static inline
void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
			  unsigned long sz, const int op)
{
	unsigned int aux_cmd, aux_tag;
	int num_lines;
	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (op == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
		aux_tag = ARC_REG_IC_PTAG;
	} else {
		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
		aux_tag = ARC_REG_DC_PTAG;
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!full_page) {
		sz += paddr & ~CACHE_LINE_MASK;
		paddr &= CACHE_LINE_MASK;
		vaddr &= CACHE_LINE_MASK;
	}
	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

	/*
	 * MMUv3, cache ops require paddr in PTAG reg
	 * if V-P const for loop, PTAG can be written once outside loop
	 */
	if (full_page)
		write_aux_reg(aux_tag, paddr);

	while (num_lines-- > 0) {
		if (!full_page) {
			write_aux_reg(aux_tag, paddr);
			paddr += L1_CACHE_BYTES;
		}

		write_aux_reg(aux_cmd, vaddr);
		vaddr += L1_CACHE_BYTES;
	}
}
示例#20
0
static INLINE uint32_t EXTRACT (value_t v, int idx)
{
    if (__builtin_constant_p (idx) && idx == 0)
        return FIRST (v);

    switch (idx) {
    case 0:
        return FIRST (v);
    case 1:
        return FIRST (__builtin_ia32_pshufd (v, 0x55));
    case 2:
        return FIRST (__builtin_ia32_pshufd (v, 0xaa));
    case 3:
        return FIRST (__builtin_ia32_pshufd (v, 0xff));
    }
    abort();
}
示例#21
0
文件: kasan.c 项目: mkrufky/linux
static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
	if (__builtin_constant_p(size)) {
		switch (size) {
		case 1:
			return memory_is_poisoned_1(addr);
		case 2:
		case 4:
		case 8:
			return memory_is_poisoned_2_4_8(addr, size);
		case 16:
			return memory_is_poisoned_16(addr);
		default:
			BUILD_BUG();
		}
	}

	return memory_is_poisoned_n(addr, size);
}
示例#22
0
static inline void digitalWriteSafe(uint8_t pin, uint8_t value) {
  if(!__builtin_constant_p(pin)) {
    digitalWrite(pin, value);
  }
  else {
    if(!DIGITALIO_NO_MIX_ANALOGWRITE)
      noAnalogWrite(pin);

    if(DIGITALIO_NO_INTERRUPT_SAFETY || _outputIsAtomic(pin)) {
      digitalWriteFast(pin, value);
    }
    else {
      ATOMIC_BLOCK(ATOMIC_RESTORESTATE)
      {
        digitalWriteFast(pin, value);
      }
    }
  }
}
示例#23
0
/***********************************************************
 * Machine specific helper for per line I-Cache invalidate.
 */
static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
				unsigned long sz)
{
	unsigned long flags;
	int num_lines;

	/*
	 * Ensure we properly floor/ceil the non-line aligned/sized requests:
	 * However page sized flushes can be compile time optimised.
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
		sz += paddr & ~ICACHE_LINE_MASK;
		paddr &= ICACHE_LINE_MASK;
		vaddr &= ICACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);

#if (CONFIG_ARC_MMU_VER <= 2)
	/* bits 17:13 of vaddr go as bits 4:0 of paddr */
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#endif

	local_irq_save(flags);
	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
		/* tag comes from phy addr */
		write_aux_reg(ARC_REG_IC_PTAG, paddr);

		/* index bits come from vaddr */
		write_aux_reg(ARC_REG_IC_IVIL, vaddr);
		vaddr += ARC_ICACHE_LINE_LEN;
#else
		/* paddr contains stuffed vaddrs bits */
		write_aux_reg(ARC_REG_IC_IVIL, paddr);
#endif
		paddr += ARC_ICACHE_LINE_LEN;
	}
	local_irq_restore(flags);
}
示例#24
0
void
pmap_tlb_info_init(struct pmap_tlb_info *ti)
{
#ifdef MULTIPROCESSOR
	if (ti == &pmap_tlb0_info) {
#endif /* MULTIPROCESSOR */
		KASSERT(ti == &pmap_tlb0_info);
		mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
		if (!CPUISMIPSNN || !__builtin_constant_p(MIPS_TLB_NUM_PIDS)) {
			ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1;
			ti->ti_asids_free = ti->ti_asid_max;
			ti->ti_asid_mask = ti->ti_asid_max;
			/*
			 * Now figure out what mask we need to focus on
			 * asid_max.
			 */
			while ((ti->ti_asid_mask + 1) & ti->ti_asid_mask) {
				ti->ti_asid_mask |= ti->ti_asid_mask >> 1;
			}
		}
示例#25
0
/*
 * Per Line Operation on D-Cache
 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
 * It's sole purpose is to help gcc generate ZOL
 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
 */
static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
				  unsigned long sz, const int aux_reg)
{
	int num_lines;

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
		sz += paddr & ~DCACHE_LINE_MASK;
		paddr &= DCACHE_LINE_MASK;
		vaddr &= DCACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);

#if (CONFIG_ARC_MMU_VER <= 2)
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#endif

	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
		/*
		 * Just as for I$, in MMU v3, D$ ops also require
		 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
		 */
		write_aux_reg(ARC_REG_DC_PTAG, paddr);

		write_aux_reg(aux_reg, vaddr);
		vaddr += ARC_DCACHE_LINE_LEN;
#else
		/* paddr contains stuffed vaddrs bits */
		write_aux_reg(aux_reg, paddr);
#endif
		paddr += ARC_DCACHE_LINE_LEN;
	}
}
示例#26
0
/**
 * radix_tree_find_next_bit - find the next set bit in a memory region
 *
 * @addr: The address to base the search on
 * @size: The bitmap size in bits
 * @offset: The bitnumber to start searching at
 *
 * Unrollable variant of find_next_bit() for constant size arrays.
 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
 * Returns next bit offset, or size if nothing found.
 */
static __always_inline unsigned long
radix_tree_find_next_bit(const unsigned long *addr,
			 unsigned long size, unsigned long offset)
{
	if (!__builtin_constant_p(size))
		return find_next_bit(addr, size, offset);

	if (offset < size) {
		unsigned long tmp;

		addr += offset / BITS_PER_LONG;
		tmp = *addr >> (offset % BITS_PER_LONG);
		if (tmp)
			return __ffs(tmp) + offset;
		offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
		while (offset < size) {
			tmp = *++addr;
			if (tmp)
				return __ffs(tmp) + offset;
			offset += BITS_PER_LONG;
		}
	}
示例#27
0
文件: const-eval.c 项目: aosm/clang
// FIXME: Turn into EVAL_EXPR test once we have more folding.
_Complex float g16 = (1.0f + 1.0fi);

// ?: in constant expressions.
int g17[(3?:1) - 2]; 

EVAL_EXPR(18, ((int)((void*)10 + 10)) == 20 ? 1 : -1);

struct s {
  int a[(int)-1.0f]; // expected-error {{array size is negative}}
};

EVAL_EXPR(19, ((int)&*(char*)10 == 10 ? 1 : -1));

EVAL_EXPR(20, __builtin_constant_p(*((int*) 10), -1, 1));

EVAL_EXPR(21, (__imag__ 2i) == 2 ? 1 : -1);

EVAL_EXPR(22, (__real__ (2i+3)) == 3 ? 1 : -1);

int g23[(int)(1.0 / 1.0)] = { 1 };
int g24[(int)(1.0 / 1.0)] = { 1 , 2 }; // expected-warning {{excess elements in array initializer}}
int g25[(int)(1.0 + 1.0)], g26 = sizeof(g25);

EVAL_EXPR(26, (_Complex double)0 ? -1 : 1)
EVAL_EXPR(27, (_Complex int)0 ? -1 : 1)
EVAL_EXPR(28, (_Complex double)1 ? 1 : -1)
EVAL_EXPR(29, (_Complex int)1 ? 1 : -1)

示例#28
0
文件: bcp-1.c 项目: alpine9000/BitOS
int opt2(void) { return __builtin_constant_p("hi"[0]); }
示例#29
0
文件: bcp-1.c 项目: alpine9000/BitOS
int good2(void) { return __builtin_constant_p((1234 + 45) & ~7); }
示例#30
0
文件: bcp-1.c 项目: alpine9000/BitOS
int good1(void) { return __builtin_constant_p("hi"); }