int Statistics::IncRecvCommands(const std::string& server, int64& seq) { ServerStatistics& stat = m_server_stats[server]; seq = atomic_add_uint64(&stat.stat_numcommands, 1); atomic_add_uint64(&stat.instantaneous_ops, 1); if (stat.ops_limit > 0 && stat.instantaneous_ops >= stat.ops_limit) { return ERR_OVERLOAD; } return 0; }
int main(int argc, char const *argv[]) { uint64_t i64 = 10, c64; uint32_t i32 = 10, c32; c64 = atomic_add_uint64(&i64, 20); printf("%ld,%ld\n", i64, c64); i64 = 20; c64 = atomic_sub_uint64(&i64, 5); printf("%ld,%ld\n", i64, c64); c32 = atomic_add_uint32(&i32, 20); printf("%d,%d\n", i32, c32); i32 = 20; c32 = atomic_sub_uint32(&i32, 5); printf("%d,%d\n", i32, c32); return 0; }
/* * Interrupt dispatcher. */ uint32_t obio_iointr(uint32_t hwpend, struct trap_frame *frame) { struct cpu_info *ci = curcpu(); int cpuid = cpu_number(); uint64_t imr, isr, mask; int ipl; int bit; struct intrhand *ih; int rc; uint64_t sum0 = CIU_IP2_SUM0(cpuid); uint64_t en0 = CIU_IP2_EN0(cpuid); isr = bus_space_read_8(&obio_tag, obio_h, sum0); imr = bus_space_read_8(&obio_tag, obio_h, en0); bit = 63; isr &= imr; if (isr == 0) return 0; /* not for us */ /* * Mask all pending interrupts. */ bus_space_write_8(&obio_tag, obio_h, en0, imr & ~isr); /* * If interrupts are spl-masked, mask them and wait for splx() * to reenable them when necessary. */ if ((mask = isr & obio_imask[cpuid][frame->ipl]) != 0) { isr &= ~mask; imr &= ~mask; } /* * Now process allowed interrupts. */ if (isr != 0) { int lvl, bitno; uint64_t tmpisr; __asm__ (".set noreorder\n"); ipl = ci->ci_ipl; __asm__ ("sync\n\t.set reorder\n"); /* Service higher level interrupts first */ for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { tmpisr = isr & (obio_imask[cpuid][lvl] ^ obio_imask[cpuid][lvl - 1]); if (tmpisr == 0) continue; for (bitno = bit, mask = 1UL << bitno; mask != 0; bitno--, mask >>= 1) { if ((tmpisr & mask) == 0) continue; rc = 0; for (ih = (struct intrhand *)obio_intrhand[bitno]; ih != NULL; ih = ih->ih_next) { #ifdef MULTIPROCESSOR u_int32_t sr; #endif splraise(ih->ih_level); #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { sr = getsr(); ENABLEIPI(); if (ipl < IPL_SCHED) __mp_lock(&kernel_lock); } #endif if ((*ih->ih_fun)(ih->ih_arg) != 0) { rc = 1; atomic_add_uint64(&ih->ih_count.ec_count, 1); } #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { if (ipl < IPL_SCHED) __mp_unlock(&kernel_lock); setsr(sr); } #endif __asm__ (".set noreorder\n"); ci->ci_ipl = ipl; __asm__ ("sync\n\t.set reorder\n"); } if (rc == 0) printf("spurious crime interrupt %d\n", bitno); isr ^= mask; if ((tmpisr ^= mask) == 0) break; } } /* * Reenable interrupts which have been serviced. */ bus_space_write_8(&obio_tag, obio_h, en0, imr); }
void AddEstimateMemSize(uint32 delta) { m_estimate_mem_size += delta; atomic_add_uint64(m_total_mem_size_ref, delta); }
/* * Interrupt dispatcher. */ uint32_t INTR_FUNCTIONNAME(uint32_t hwpend, struct trap_frame *frame) { struct cpu_info *ci = curcpu(); uint64_t imr, isr, mask; int ipl; int bit; struct intrhand *ih; int rc, ret; INTR_LOCAL_DECLS INTR_GETMASKS; isr &= imr; if (isr == 0) return 0; /* not for us */ /* * Mask all pending interrupts. */ INTR_MASKPENDING; /* * If interrupts are spl-masked, mask them and wait for splx() * to reenable them when necessary. */ if ((mask = isr & INTR_IMASK(frame->ipl)) != 0) { isr &= ~mask; imr &= ~mask; } /* * Now process allowed interrupts. */ if (isr != 0) { int lvl, bitno; uint64_t tmpisr; __asm__ (".set noreorder\n"); ipl = ci->ci_ipl; __asm__ ("sync\n\t.set reorder\n"); /* Service higher level interrupts first */ for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { tmpisr = isr & (INTR_IMASK(lvl) ^ INTR_IMASK(lvl - 1)); if (tmpisr == 0) continue; for (bitno = bit, mask = 1UL << bitno; mask != 0; bitno--, mask >>= 1) { if ((tmpisr & mask) == 0) continue; rc = 0; for (ih = INTR_HANDLER(bitno); ih != NULL; ih = ih->ih_next) { #ifdef MULTIPROCESSOR u_int32_t sr; #endif #if defined(INTR_HANDLER_SKIP) if (INTR_HANDLER_SKIP(ih) != 0) continue; #endif splraise(ih->ih_level); #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { sr = getsr(); ENABLEIPI(); if (ipl < IPL_SCHED) __mp_lock(&kernel_lock); } #endif ret = (*ih->ih_fun)(ih->ih_arg); if (ret != 0) { rc = 1; atomic_add_uint64(&ih->ih_count.ec_count, 1); } #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { if (ipl < IPL_SCHED) __mp_unlock(&kernel_lock); setsr(sr); } #endif __asm__ (".set noreorder\n"); ci->ci_ipl = ipl; __asm__ ("sync\n\t.set reorder\n"); if (ret == 1) break; } if (rc == 0) INTR_SPURIOUS(bitno); isr ^= mask; if ((tmpisr ^= mask) == 0) break; } } /* * Reenable interrupts which have been serviced. */ INTR_MASKRESTORE; }
void ServerStat::IncAcceptedClient() { atomic_add_uint64(&connected_clients, 1); atomic_add_uint64(&stat_numconnections, 1); }
void ServerStat::IncRecvCommands() { atomic_add_uint64(&stat_numcommands, 1); atomic_add_uint64(&(stat_period_numcommands[now % kMaxPeriodSlot]), 1); }
void Statistics::IncRefusedConnection(const std::string& server) { ServerStatistics& stat = m_server_stats[server]; atomic_add_uint64(&stat.refused_connections, 1); }
void Statistics::IncAcceptedClient(const std::string& server, int v) { ServerStatistics& stat = m_server_stats[server]; atomic_add_uint64(&stat.connections_received, v); }