TEST(AtomicTest, test_add_sub_u64) { atomic_u64_t data; atomic_store_u64(&data, 0); EXPECT_EQ((unsigned)0, atomic_load_u64(&data)); uint64_t val = atomic_add_u64(&data, 0xffff); EXPECT_EQ((unsigned)0xffff, atomic_load_u64(&data)); EXPECT_EQ((unsigned)0xffff, val); val = atomic_add_u64(&data, 0xffff); EXPECT_EQ((unsigned)(2 * 0xffff), atomic_load_u64(&data)); EXPECT_EQ((unsigned)(2 * 0xffff), val); val = atomic_add_u64(&data, 0xffff); EXPECT_EQ((unsigned)(3 * 0xffff), atomic_load_u64(&data)); EXPECT_EQ((unsigned)(3 * 0xffff), val); EXPECT_NE((unsigned)(3 * 0xfff0), val); val = atomic_sub_u64(&data, 0xffff); EXPECT_EQ((unsigned)(2 * 0xffff), val); val = atomic_sub_u64(&data, 0); EXPECT_EQ((unsigned)(2 * 0xffff), val); val = atomic_sub_u64(&data, 0xffff); EXPECT_EQ((unsigned)(1 * 0xffff), val); val = atomic_sub_u64(&data, 0xffff); EXPECT_EQ((unsigned)0, val); }
static inline ssize_t _process_msg0 ( timeshift_t *ts, timeshift_file_t *tsf, streaming_message_t **smp ) { int i; ssize_t err; streaming_start_t *ss; streaming_message_t *sm = *smp; if (sm->sm_type == SMT_START) { err = 0; timeshift_index_data_t *ti = calloc(1, sizeof(timeshift_index_data_t)); ti->pos = tsf->size; ti->data = sm; *smp = NULL; TAILQ_INSERT_TAIL(&tsf->sstart, ti, link); /* Update video index */ ss = sm->sm_data; for (i = 0; i < ss->ss_num_components; i++) if (SCT_ISVIDEO(ss->ss_components[i].ssc_type)) ts->vididx = ss->ss_components[i].ssc_index; } else if (sm->sm_type == SMT_SIGNAL_STATUS) err = timeshift_write_sigstat(tsf, sm->sm_time, sm->sm_data); else if (sm->sm_type == SMT_PACKET) { err = timeshift_write_packet(tsf, sm->sm_time, sm->sm_data); if (err > 0) { th_pkt_t *pkt = sm->sm_data; /* Index video iframes */ if (pkt->pkt_componentindex == ts->vididx && pkt->pkt_frametype == PKT_I_FRAME) { timeshift_index_iframe_t *ti = calloc(1, sizeof(timeshift_index_iframe_t)); ti->pos = tsf->size; ti->time = sm->sm_time; TAILQ_INSERT_TAIL(&tsf->iframes, ti, link); } } } else if (sm->sm_type == SMT_MPEGTS) err = timeshift_write_mpegts(tsf, sm->sm_time, sm->sm_data); else err = 0; /* OK */ if (err > 0) { tsf->last = sm->sm_time; tsf->size += err; atomic_add_u64(×hift_total_size, err); if (tsf->ram) atomic_add_u64(×hift_total_ram_size, err); } return err; }
/* * Close file */ void timeshift_filemgr_close ( timeshift_file_t *tsf ) { ssize_t r = timeshift_write_eof(tsf); if (r > 0) { tsf->size += r; atomic_add_u64(×hift_total_size, r); if (tsf->ram) atomic_add_u64(×hift_total_ram_size, r); } if (tsf->wfd >= 0) close(tsf->wfd); tsf->wfd = -1; }
static int timer_stress_worker(void* void_arg) { timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg); while (!atomic_load(&args->timer_stress_done)) { timer_t t = TIMER_INITIAL_VALUE(t); zx_duration_t timer_duration = rand_duration(ZX_MSEC(5)); // Set a timer, then switch to a different CPU to ensure we race with it. arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + timer_duration); timer_set(&t, deadline, timer_stress_cb, void_arg); thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); // We're now running on something other than timer_cpu. atomic_add_u64(&args->num_set, 1); // Sleep for the timer duration so that this thread's timer_cancel races with the timer // callback. We want to race to ensure there are no synchronization or memory visibility // issues. thread_sleep_relative(timer_duration); timer_cancel(&t); } return 0; }
/* * Remove file */ void timeshift_filemgr_remove ( timeshift_t *ts, timeshift_file_t *tsf, int force ) { if (tsf->wfd >= 0) close(tsf->wfd); assert(tsf->rfd < 0); #if ENABLE_TRACE if (tsf->path) tvhdebug("timeshift", "ts %d remove %s", ts->id, tsf->path); else tvhdebug("timeshift", "ts %d RAM segment remove time %li", ts->id, (long)tsf->time); #endif TAILQ_REMOVE(&ts->files, tsf, link); atomic_add_u64(×hift_total_size, -tsf->size); if (tsf->ram) atomic_add_u64(×hift_total_ram_size, -tsf->size); timeshift_reaper_remove(tsf); }
/* * Remove file */ void timeshift_filemgr_remove ( timeshift_t *ts, timeshift_file_t *tsf, int force ) { if (tsf->fd != -1) close(tsf->fd); tvhlog(LOG_DEBUG, "timeshift", "ts %d remove %s", ts->id, tsf->path); TAILQ_REMOVE(&ts->files, tsf, link); atomic_add_u64(×hift_total_size, -tsf->size); timeshift_reaper_remove(tsf); }
static inline ssize_t _process_msg0 ( timeshift_t *ts, timeshift_file_t *tsf, streaming_message_t *sm ) { ssize_t err; if (sm->sm_type == SMT_START) { err = 0; _handle_sstart(ts, tsf, streaming_msg_clone(sm)); } else if (sm->sm_type == SMT_SIGNAL_STATUS) err = timeshift_write_sigstat(tsf, sm->sm_time, sm->sm_data); else if (sm->sm_type == SMT_PACKET) { err = timeshift_write_packet(tsf, sm->sm_time, sm->sm_data); if (err > 0) { th_pkt_t *pkt = sm->sm_data; /* Index video iframes */ if (pkt->pkt_componentindex == ts->vididx && pkt->pkt_frametype == PKT_I_FRAME) { timeshift_index_iframe_t *ti = calloc(1, sizeof(timeshift_index_iframe_t)); memoryinfo_alloc(×hift_memoryinfo, sizeof(*ti)); ti->pos = tsf->size; ti->time = sm->sm_time; TAILQ_INSERT_TAIL(&tsf->iframes, ti, link); } } } else if (sm->sm_type == SMT_MPEGTS) { err = timeshift_write_mpegts(tsf, sm->sm_time, sm->sm_data); } else err = 0; /* OK */ if (err > 0) { tsf->last = sm->sm_time; tsf->size += err; atomic_add_u64(×hift_total_size, err); if (tsf->ram) atomic_add_u64(×hift_total_ram_size, err); } return err; }
static void timer_stress_cb(struct timer* t, zx_time_t now, void* void_arg) { timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg); atomic_add_u64(&args->num_fired, 1); }