/* * MS blocks are 16K aligned. * Cardtables are 4K aligned, at least. * This means that the cardtable of a given block is 32 bytes aligned. */ static guint8* initial_skip_card (guint8 *card_data) { mword *cards = (mword*)card_data; mword card; int i; for (i = 0; i < CARD_WORDS_PER_BLOCK; ++i) { card = cards [i]; if (card) break; } if (i == CARD_WORDS_PER_BLOCK) return card_data + CARDS_PER_BLOCK; #if defined(__i386__) && defined(__GNUC__) return card_data + i * 4 + (__builtin_ffs (card) - 1) / 8; #elif defined(__x86_64__) && defined(__GNUC__) return card_data + i * 8 + (__builtin_ffsll (card) - 1) / 8; #else for (i = i * SIZEOF_VOID_P; i < CARDS_PER_BLOCK; ++i) { if (card_data [i]) return &card_data [i]; } return card_data; #endif }
BitBoard Bishops::getAttacksFrom(BitBoard bishops, BitBoard targets, BitBoard friendlies) { BitBoard result(0LL); while (0LL != bishops) { const Square source(__builtin_ffsll(bishops) - 1); bishops &= ~(1LL << source); result |= getAttacksFrom(source, targets, friendlies); } return result; }
ll s(ll x){ if (x==0) return 0; int leftmostone=63-__builtin_clzll(x); int rightmostone=__builtin_ffsll(x)-1; if (leftmostone==rightmostone){ return (rightmostone+s(x-1)) % MOD; } else{ ll y = x^(1ll<<rightmostone); int middlezeros=leftmostone-rightmostone+1-__builtin_popcountll(x); return (s(y)+range(rightmostone,leftmostone+middlezeros+2)) % MOD; } }
int signal_wait(struct thread* thread, uint64_t wait_mask) { int retval = 0; struct thread *sleeping = 0; // set if thread should go to sleep. { struct process* process = thread->process; SPIN_GUARD_RAW(process->signal.lock); SPIN_GUARD_RAW(thread->signal.lock); // is a waited signal already pending? int process_signum = __builtin_ffsll(process->signal.pending_mask & wait_mask); int thread_signum = __builtin_ffsll(thread->signal.pending_mask & wait_mask); if (process_signum && (!thread_signum || process_signum < thread_signum)) { int signum = process_signum - 1; uint64_t sigbit = 1ull << (signum%SIGNAL_LIMIT); struct process_signal_info* sig = process->signal.sig + signum; fifo_item_t *fi = fifo_pop(&sig->pending); if (fifo_empty(&sig->pending)) process->signal.pending_mask &= ~sigbit; struct signal_pending* pending = fifo_container(fi, struct signal_pending, item); thread->signal.wait_mask = 0; thread->signal.wait_signum = retval = signum; thread->signal.wait_sigval = pending->sigval; heap_free(pending); } else if (thread_signum) {
static Int msb(Int inp USES_REGS) /* calculate the most significant bit for an integer */ { /* the obvious solution: do it by using binary search */ Int out = 0; if (inp < 0) { return Yap_ArithError(DOMAIN_ERROR_NOT_LESS_THAN_ZERO, MkIntegerTerm(inp), "msb/1 received %d", inp); } #if HAVE__BUILTIN_FFSLL out = __builtin_ffsll(inp); #elif HAVE_FFSLL out = ffsll(inp); #else if (inp==0) return 0L; #if SIZEOF_INT_P == 8 if (inp & ((CELL)0xffffffffLL << 32)) {inp >>= 32; out += 32;}
char parity3[__builtin_parity(0xb822) == 0 ? 1 : -1]; char parity4[__builtin_parity(0xb823) == 1 ? 1 : -1]; char parity5[__builtin_parity(0xb824) == 0 ? 1 : -1]; char parity6[__builtin_parity(0xb825) == 1 ? 1 : -1]; char parity7[__builtin_parity(0xb826) == 1 ? 1 : -1]; char parity8[__builtin_parity(~0) == 0 ? 1 : -1]; char parity9[__builtin_parityl(1L << (BITSIZE(long) - 1)) == 1 ? 1 : -1]; char parity10[__builtin_parityll(1LL << (BITSIZE(long long) - 1)) == 1 ? 1 : -1]; char ffs1[__builtin_ffs(0) == 0 ? 1 : -1]; char ffs2[__builtin_ffs(1) == 1 ? 1 : -1]; char ffs3[__builtin_ffs(0xfbe71) == 1 ? 1 : -1]; char ffs4[__builtin_ffs(0xfbe70) == 5 ? 1 : -1]; char ffs5[__builtin_ffs(1U << (BITSIZE(int) - 1)) == BITSIZE(int) ? 1 : -1]; char ffs6[__builtin_ffsl(0x10L) == 5 ? 1 : -1]; char ffs7[__builtin_ffsll(0x100LL) == 9 ? 1 : -1]; #undef BITSIZE // GCC misc stuff extern int f(); int h0 = __builtin_types_compatible_p(int, float); //int h1 = __builtin_choose_expr(1, 10, f()); //int h2 = __builtin_expect(0, 0); int h3 = __builtin_bswap16(0x1234) == 0x3412 ? 1 : f(); int h4 = __builtin_bswap32(0x1234) == 0x34120000 ? 1 : f(); int h5 = __builtin_bswap64(0x1234) == 0x3412000000000000 ? 1 : f(); extern long int bi0; extern __typeof__(__builtin_expect(0, 0)) bi0;
static int process_request(struct channel_packet *pkt, struct channel_info *chan_info) { uint64_t core_mask; if (chan_info == NULL) return -1; if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED, CHANNEL_MGR_CHANNEL_PROCESSING) == 0) return -1; if (pkt->command == CPU_POWER) { core_mask = get_pcpus_mask(chan_info, pkt->resource_id); if (core_mask == 0) { RTE_LOG(ERR, CHANNEL_MONITOR, "Error get physical CPU mask for " "channel '%s' using vCPU(%u)\n", chan_info->channel_path, (unsigned)pkt->unit); return -1; } if (__builtin_popcountll(core_mask) == 1) { unsigned core_num = __builtin_ffsll(core_mask) - 1; switch (pkt->unit) { case(CPU_POWER_SCALE_MIN): power_manager_scale_core_min(core_num); break; case(CPU_POWER_SCALE_MAX): power_manager_scale_core_max(core_num); break; case(CPU_POWER_SCALE_DOWN): power_manager_scale_core_down(core_num); break; case(CPU_POWER_SCALE_UP): power_manager_scale_core_up(core_num); break; default: break; } } else { switch (pkt->unit) { case(CPU_POWER_SCALE_MIN): power_manager_scale_mask_min(core_mask); break; case(CPU_POWER_SCALE_MAX): power_manager_scale_mask_max(core_mask); break; case(CPU_POWER_SCALE_DOWN): power_manager_scale_mask_down(core_mask); break; case(CPU_POWER_SCALE_UP): power_manager_scale_mask_up(core_mask); break; default: break; } } } /* Return is not checked as channel status may have been set to DISABLED * from management thread */ rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING, CHANNEL_MGR_CHANNEL_CONNECTED); return 0; }
int ffsll (long long x) { return __builtin_ffsll (x); }
inline unsigned int ffsll (unsigned long long x) { return __builtin_ffsll (x); }