rcl_ret_t rcl_timer_get_time_until_next_call(const rcl_timer_t * timer, int64_t * time_until_next_call) { RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_ARGUMENT_FOR_NULL(time_until_next_call, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_FOR_NULL_WITH_MSG(timer->impl, "timer is invalid", return RCL_RET_TIMER_INVALID); rcl_steady_time_point_t now; rcl_ret_t ret = rcl_steady_time_point_now(&now); if (ret != RCL_RET_OK) { return ret; // rcl error state should already be set. } uint64_t period = rcl_atomic_load_uint64_t(&timer->impl->period); *time_until_next_call = (rcl_atomic_load_uint64_t(&timer->impl->last_call_time) + period) - now.nanoseconds; return RCL_RET_OK; }
rcl_ret_t rcl_timer_get_period(const rcl_timer_t * timer, uint64_t * period) { RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_ARGUMENT_FOR_NULL(period, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_FOR_NULL_WITH_MSG(timer->impl, "timer is invalid", return RCL_RET_TIMER_INVALID); *period = rcl_atomic_load_uint64_t(&timer->impl->period); return RCL_RET_OK; }
rcl_ret_t rcl_init(int argc, char ** argv, rcl_allocator_t allocator) { rcl_ret_t fail_ret = RCL_RET_ERROR; if (argc > 0) { RCL_CHECK_ARGUMENT_FOR_NULL(argv, RCL_RET_INVALID_ARGUMENT); } RCL_CHECK_FOR_NULL_WITH_MSG( allocator.allocate, "invalid allocator, allocate not set", return RCL_RET_INVALID_ARGUMENT); RCL_CHECK_FOR_NULL_WITH_MSG( allocator.deallocate, "invalid allocator, deallocate not set", return RCL_RET_INVALID_ARGUMENT); if (rcl_atomic_exchange_bool(&__rcl_is_initialized, true)) { RCL_SET_ERROR_MSG("rcl_init called while already initialized"); return RCL_RET_ALREADY_INIT; } // There is a race condition between the time __rcl_is_initialized is set true, // and when the allocator is set, in which rcl_shutdown() could get rcl_ok() as // true and try to use the allocator, but it isn't set yet... // A very unlikely race condition, but it is possile I think. // I've documented that rcl_init() and rcl_shutdown() are not thread-safe with each other. __rcl_allocator = allocator; // Set the new allocator. // TODO(wjwwood): Remove rcl specific command line arguments. // For now just copy the argc and argv. __rcl_argc = argc; __rcl_argv = (char **)__rcl_allocator.allocate(sizeof(char *) * argc, __rcl_allocator.state); if (!__rcl_argv) { RCL_SET_ERROR_MSG("allocation failed"); fail_ret = RCL_RET_BAD_ALLOC; goto fail; } memset(__rcl_argv, 0, sizeof(char **) * argc); int i; for (i = 0; i < argc; ++i) { __rcl_argv[i] = (char *)__rcl_allocator.allocate(strlen(argv[i]), __rcl_allocator.state); memcpy(__rcl_argv[i], argv[i], strlen(argv[i])); } rcl_atomic_store(&__rcl_instance_id, ++__rcl_next_unique_id); if (rcl_atomic_load_uint64_t(&__rcl_instance_id) == 0) { // Roll over occurred. __rcl_next_unique_id--; // roll back to avoid the next call succeeding. RCL_SET_ERROR_MSG("unique rcl instance ids exhausted"); goto fail; } return RCL_RET_OK; fail: __clean_up_init(); return fail_ret; }
rcl_ret_t rcl_timer_get_time_since_last_call( const rcl_timer_t * timer, rcl_time_point_value_t * time_since_last_call) { RCL_CHECK_ARGUMENT_FOR_NULL(timer, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_ARGUMENT_FOR_NULL(time_since_last_call, RCL_RET_INVALID_ARGUMENT); RCL_CHECK_FOR_NULL_WITH_MSG(timer->impl, "timer is invalid", return RCL_RET_TIMER_INVALID); rcl_time_point_value_t now; rcl_ret_t ret = rcl_steady_time_now(&now); if (ret != RCL_RET_OK) { return ret; // rcl error state should already be set. } *time_since_last_call = now - rcl_atomic_load_uint64_t(&timer->impl->last_call_time); return RCL_RET_OK; }
uint64_t rcl_get_instance_id() { return rcl_atomic_load_uint64_t(&__rcl_instance_id); }