void CameraTrigger::start() { if ((_trigger_mode == TRIGGER_MODE_INTERVAL_ALWAYS_ON || _trigger_mode == TRIGGER_MODE_DISTANCE_ALWAYS_ON) && _camera_interface->has_power_control() && !_camera_interface->is_powered_on()) { // If in always-on mode and the interface supports it, enable power to the camera toggle_power(); enable_keep_alive(true); } else { enable_keep_alive(false); } // enable immediately if configured that way if (_trigger_mode == TRIGGER_MODE_INTERVAL_ALWAYS_ON) { // enable and start triggering _trigger_enabled = true; update_intervalometer(); } else if (_trigger_mode == TRIGGER_MODE_DISTANCE_ALWAYS_ON) { // just enable, but do not fire. actual trigger is based on distance covered _trigger_enabled = true; } // start to monitor at high rate for trigger enable command work_queue(LPWORK, &_work, (worker_t)&CameraTrigger::cycle_trampoline, this, USEC2TICK(1)); }
void RenderMeshComponent::RenderPrep(const CameraInterface& camera) { for (int pass = 0; pass < RenderPass_Count; pass++) { pass_render_list_[pass].clear(); } for (auto iter = component_data_.begin(); iter != component_data_.end(); ++iter) { RenderMeshData* rendermesh_data = GetComponentData(iter->entity); TransformData* transform_data = Data<TransformData>(iter->entity); float max_cos = cos(camera.viewport_angle()); vec3 camera_facing = camera.facing(); vec3 camera_position = camera.position(); // Put each entity into the list for each render pass it is // planning on participating in. for (int pass = 0; pass < RenderPass_Count; pass++) { if (rendermesh_data->pass_mask & (1 << pass)) { if (!rendermesh_data->visible) continue; // Check to make sure objects are inside the frustrum of our // view-cone before we draw: vec3 entity_position = transform_data->world_transform.TranslationVector3D(); vec3 pos_relative_to_camera = (entity_position - camera_position) + camera_facing * kFrustrumOffset; // Cache off the distance from the camera because we'll use it // later as a depth aproxamation. rendermesh_data->z_depth = (entity_position - camera.position()).LengthSquared(); // Are we culling this object based on the view angle? // If so, does this lie outside of our view frustrum? if ((rendermesh_data->culling_mask & (1 << CullingTest_ViewAngle)) && (vec3::DotProduct(pos_relative_to_camera.Normalized(), camera_facing.Normalized()) < max_cos)) { // The origin point for this mesh is not in our field of view. Cut // out early, and don't bother rendering it. continue; } // Are we culling this object based on view distance? If so, // is it far enough away that we should skip it? if ((rendermesh_data->culling_mask & (1 << CullingTest_Distance)) && rendermesh_data->z_depth > culling_distance_squared()) { continue; } pass_render_list_[pass].push_back( RenderlistEntry(iter->entity, &iter->data)); } } } std::sort(pass_render_list_[RenderPass_Opaque].begin(), pass_render_list_[RenderPass_Opaque].end()); std::sort(pass_render_list_[RenderPass_Alpha].begin(), pass_render_list_[RenderPass_Alpha].end(), std::greater<RenderlistEntry>()); }
void CameraTrigger::status() { PX4_INFO("state : %s", _trigger_enabled ? "enabled" : "disabled"); PX4_INFO("mode : %i", _mode); PX4_INFO("interval : %.2f [ms]", (double)_interval); PX4_INFO("distance : %.2f [m]", (double)_distance); PX4_INFO("activation time : %.2f [ms]", (double)_activation_time); _camera_interface->info(); }
void CameraTrigger::status() { PX4_INFO("main state : %s", _trigger_enabled ? "enabled" : "disabled"); PX4_INFO("pause state : %s", _trigger_paused ? "paused" : "active"); PX4_INFO("mode : %i", _trigger_mode); if (_trigger_mode == TRIGGER_MODE_INTERVAL_ALWAYS_ON || _trigger_mode == TRIGGER_MODE_INTERVAL_ON_CMD) { PX4_INFO("interval : %.2f [ms]", (double)_interval); } else if (_trigger_mode == TRIGGER_MODE_DISTANCE_ALWAYS_ON || _trigger_mode == TRIGGER_MODE_DISTANCE_ON_CMD) { PX4_INFO("distance : %.2f [m]", (double)_distance); } if (_camera_interface->has_power_control()) { PX4_INFO("camera power : %s", _camera_interface->is_powered_on() ? "ON" : "OFF"); } PX4_INFO("activation time : %.2f [ms]", (double)_activation_time); _camera_interface->info(); }
// Render a single render-pass, by ID. void RenderMeshComponent::RenderPass(int pass_id, const CameraInterface& camera, fplbase::Renderer& renderer, const fplbase::Shader* shader_override) { mat4 camera_vp = camera.GetTransformMatrix(); for (size_t i = 0; i < pass_render_list_[pass_id].size(); i++) { EntityRef& entity = pass_render_list_[pass_id][i].entity; RenderMeshData* rendermesh_data = Data<RenderMeshData>(entity); TransformData* transform_data = Data<TransformData>(entity); AnimationData* anim_data = Data<AnimationData>(entity); // TODO: anim_data will set uniforms for an array of matricies. Each // matrix represents one bone position. const bool has_anim = anim_data != nullptr && anim_data->motivator.Valid(); const int num_mesh_bones = rendermesh_data->mesh->num_bones(); const int num_anim_bones = has_anim ? anim_data->motivator.DefiningAnim()->NumBones() : 0; const bool has_one_bone_anim = has_anim && (num_mesh_bones <= 1 || num_anim_bones == 1); const mat4 world_transform = has_one_bone_anim ? transform_data->world_transform * mat4::FromAffineTransform( anim_data->motivator.GlobalTransforms()[0]) : transform_data->world_transform; const mat4 mvp = camera_vp * world_transform; const mat4 world_matrix_inverse = world_transform.Inverse(); renderer.set_light_pos(world_matrix_inverse * light_position_); renderer.set_color(rendermesh_data->tint); renderer.set_model(world_transform); // If the mesh has a skeleton, we need to update the bone positions. // The positions are normally supplied by the animation, but if they are // not, use the default pose in the RenderMesh. if (num_mesh_bones > 1) { const bool use_default_pose = num_anim_bones != num_mesh_bones || rendermesh_data->default_pose; const mathfu::AffineTransform* bone_transforms = use_default_pose ? rendermesh_data->mesh->bone_global_transforms() : anim_data->motivator.GlobalTransforms(); rendermesh_data->mesh->GatherShaderTransforms( bone_transforms, rendermesh_data->shader_transforms); renderer.SetBoneTransforms(rendermesh_data->shader_transforms, rendermesh_data->num_shader_transforms); } if (!camera.IsStereo()) { renderer.set_camera_pos(world_matrix_inverse * camera.position()); renderer.set_model_view_projection(mvp); if (!shader_override && rendermesh_data->shader) { rendermesh_data->shader->Set(renderer); } else { shader_override->Set(renderer); } rendermesh_data->mesh->Render(renderer); } else { const fplbase::Shader* shader = nullptr; if (!shader_override && rendermesh_data->shader) { shader = rendermesh_data->shader; } else { shader = shader_override; } mathfu::vec4i viewport[2] = {camera.viewport(0), camera.viewport(1)}; mat4 camera_vp_stereo = camera.GetTransformMatrix(1); mat4 mvp_matrices[2] = {mvp, camera_vp_stereo * world_transform}; vec3 camera_positions[2] = {world_matrix_inverse * camera.position(0), world_matrix_inverse * camera.position(1)}; rendermesh_data->mesh->RenderStereo(renderer, shader, viewport, mvp_matrices, camera_positions); } } }