static void audiosource_traverse(GF_Node *node, void *rs, Bool is_destroy) { GF_TraverseState*tr_state = (GF_TraverseState*)rs; M_AudioSource *as = (M_AudioSource *)node; AudioSourceStack *st = (AudioSourceStack *)gf_node_get_private(node); if (is_destroy) { gf_sc_audio_predestroy(&st->input); if (st->time_handle.is_registered) { gf_sc_unregister_time_node(st->input.compositor, &st->time_handle); } gf_free(st); return; } /*check end of stream*/ if (st->input.stream && st->input.stream_finished) { if (gf_mo_get_loop(st->input.stream, 0)) { gf_sc_audio_restart(&st->input); } else if (st->is_active && gf_mo_should_deactivate(st->input.stream)) { /*deactivate*/ audiosource_deactivate(st, as); } } if (st->is_active) { gf_sc_audio_register(&st->input, (GF_TraverseState*)rs); } /*store mute flag*/ st->input.is_muted = tr_state->switched_off; }
static void audioclip_traverse(GF_Node *node, void *rs, Bool is_destroy) { GF_TraverseState *tr_state = (GF_TraverseState *)rs; M_AudioClip *ac = (M_AudioClip *)node; AudioClipStack *st = (AudioClipStack *)gf_node_get_private(node); if (is_destroy) { gf_sc_audio_predestroy(&st->input); if (st->time_handle.is_registered) { gf_sc_unregister_time_node(st->input.compositor, &st->time_handle); } gf_free(st); return; } if (st->failure) return; /*check end of stream*/ if (st->input.stream && st->input.stream_finished) { if (gf_mo_get_loop(st->input.stream, ac->loop)) { gf_sc_audio_restart(&st->input); } else if (ac->isActive && gf_mo_should_deactivate(st->input.stream)) { /*deactivate*/ audioclip_deactivate(st, ac); } } if (ac->isActive) { gf_sc_audio_register(&st->input, (GF_TraverseState*)rs); } if (st->set_duration && st->input.stream) { ac->duration_changed = gf_mo_get_duration(st->input.stream); gf_node_event_out_str(node, "duration_changed"); st->set_duration = 0; } /*store mute flag*/ st->input.is_muted = tr_state->switched_off; }
static void svg_audio_smil_evaluate_ex(SMIL_Timing_RTI *rti, Fixed normalized_scene_time, u32 status, GF_Node *slave_audio, GF_Node *video) { GF_Node *audio; SVG_audio_stack *stack; audio = slave_audio; if (!audio) audio = gf_smil_get_element(rti); stack = (SVG_audio_stack *)gf_node_get_private(audio); switch (status) { case SMIL_TIMING_EVAL_UPDATE: if (!stack->is_active && !stack->is_error) { if (stack->aurl.count) { SVGAllAttributes atts; Bool lock_timeline = GF_FALSE; gf_svg_flatten_attributes((SVG_Element*) (video ? video : audio), &atts); if (atts.syncBehavior) lock_timeline = (*atts.syncBehavior == SMIL_SYNCBEHAVIOR_LOCKED) ? GF_TRUE : GF_FALSE; if (gf_sc_audio_open(&stack->input, &stack->aurl, atts.clipBegin ? (*atts.clipBegin) : 0.0, atts.clipEnd ? (*atts.clipEnd) : -1.0, lock_timeline) == GF_OK) { gf_mo_set_speed(stack->input.stream, FIX_ONE); stack->is_active = GF_TRUE; } else { stack->is_error = GF_TRUE; } } } else if (!slave_audio && stack->input.stream_finished && (gf_smil_get_media_duration(rti) < 0) ) { Double dur = gf_mo_get_duration(stack->input.stream); if (dur <= 0) { dur = gf_mo_get_last_frame_time(stack->input.stream); dur /= 1000; } gf_smil_set_media_duration(rti, dur); } break; case SMIL_TIMING_EVAL_REPEAT: if (stack->is_active) gf_sc_audio_restart(&stack->input); break; case SMIL_TIMING_EVAL_FREEZE: gf_sc_audio_stop(&stack->input); stack->is_active = GF_FALSE; break; case SMIL_TIMING_EVAL_REMOVE: gf_sc_audio_stop(&stack->input); stack->is_active = GF_FALSE; break; case SMIL_TIMING_EVAL_DEACTIVATE: if (stack->is_active) { gf_sc_audio_stop(&stack->input); gf_sc_audio_unregister(&stack->input); stack->is_active = GF_FALSE; } break; } }