int odp_queue_set_context(odp_queue_t handle, void *context) { queue_entry_t *queue; queue = queue_to_qentry(handle); odp_sync_stores(); queue->s.param.context = context; odp_sync_stores(); return 0; }
void odp_ticketlock_unlock(odp_ticketlock_t *ticketlock) { /* Release the lock by incrementing 'cur_ticket'. As we are the * lock owner and thus the only thread that is allowed to write * 'cur_ticket', we don't need to do this with an (expensive) * atomic RMW operation. Instead load-relaxed the current value * and a store-release of the incremented value */ uint32_t cur = odp_atomic_load_u32(&ticketlock->cur_ticket); odp_atomic_store_rel_u32(&ticketlock->cur_ticket, cur + 1); #if defined __OCTEON__ odp_sync_stores(); /* SYNCW to flush write buffer */ #endif }
odp_timer_t odp_timer_create(const char *name, odp_buffer_pool_t pool, uint64_t resolution, uint64_t min_tmo, uint64_t max_tmo) { uint32_t id; (void) name; (void) resolution; (void) min_tmo; (void) max_tmo; if (odp_timer.num_timers >= NUM_TIMERS) return ODP_TIMER_INVALID; id = odp_atomic_fetch_inc_int(&odp_timer.num_timers); if (id >= NUM_TIMERS) return ODP_TIMER_INVALID; odp_timer.timer[id].pool = pool; odp_timer.timer[id].resolution_ns = RESOLUTION_NS; odp_timer.timer[id].max_ticks = MAX_TICKS; odp_sync_stores(); odp_timer.timer[id].active = 1; return id + 1; }
static void ofp_perf_tmo(void *arg) { uint64_t pps, value = 0; int core; (void)arg; if (ofp_stat_flags & OFP_STAT_COMPUTE_PERF) ofp_timer_start(US_PER_SEC/PROBES, ofp_perf_tmo, NULL, 0); odp_sync_stores(); for (core = 0; core < odp_cpu_count(); core++) value += shm_stat->ofp_packet_statistics.per_core[core].rx_fp; if (value >= shm_stat->ofp_perf_stat.rx_prev_sum) pps = value - shm_stat->ofp_perf_stat.rx_prev_sum; else pps = (uint64_t)(-1) - shm_stat->ofp_perf_stat.rx_prev_sum + value; shm_stat->ofp_perf_stat.rx_fp_pps = (shm_stat->ofp_perf_stat.rx_fp_pps + pps * PROBES) / 2; shm_stat->ofp_perf_stat.rx_prev_sum = value; }