Ejemplo n.º 1
0
GF_Err gf_sr_set_scene(GF_Renderer *sr, GF_SceneGraph *scene_graph)
{
	u32 width, height;
	Bool do_notif;

	if (!sr) return GF_BAD_PARAM;

	gf_sr_lock(sr, 1);
	GF_LOG(GF_LOG_DEBUG, GF_LOG_RENDER, (scene_graph ? "[Render] Attaching new scene\n" : "[Render] Detaching scene\n"));

	if (sr->audio_renderer && (sr->scene != scene_graph)) {
		GF_LOG(GF_LOG_DEBUG, GF_LOG_RENDER, ("[Render] Reseting audio render\n"));
		gf_sr_ar_reset(sr->audio_renderer);
	}

#ifdef GF_SR_EVENT_QUEUE
	GF_LOG(GF_LOG_DEBUG, GF_LOG_RENDER, ("[Render] Reseting event queue\n"));
	gf_mx_p(sr->ev_mx);
	while (gf_list_count(sr->events)) {
		GF_Event *ev = (GF_Event*)gf_list_get(sr->events, 0);
		gf_list_rem(sr->events, 0);
		free(ev);
	}
#endif
	
	GF_LOG(GF_LOG_DEBUG, GF_LOG_RENDER, ("[Render] Reseting render module\n"));
	/*reset main surface*/
	sr->visual_renderer->SceneReset(sr->visual_renderer);

	/*set current graph*/
	sr->scene = scene_graph;
	do_notif = 0;
	if (scene_graph) {
#ifndef GPAC_DISABLE_SVG
		SVG_Length *w, *h;
#endif
		const char *opt;
		Bool is_svg = 0;
		u32 tag;
		GF_Node *top_node;
		Bool had_size_info = sr->has_size_info;
		/*get pixel size if any*/
		gf_sg_get_scene_size_info(sr->scene, &width, &height);
		sr->has_size_info = (width && height) ? 1 : 0;
		if (sr->has_size_info != had_size_info) sr->scene_width = sr->scene_height = 0;

		/*default back color is black*/
		if (! (sr->user->init_flags & GF_TERM_WINDOWLESS)) sr->back_color = 0xFF000000;

		top_node = gf_sg_get_root_node(sr->scene);
		tag = 0;
		if (top_node) tag = gf_node_get_tag(top_node);

#ifndef GPAC_DISABLE_SVG
		w = h = NULL;
		if ((tag>=GF_NODE_RANGE_FIRST_SVG) && (tag<=GF_NODE_RANGE_LAST_SVG)) {
			GF_FieldInfo info;
			is_svg = 1;
			if (gf_svg_get_attribute_by_tag(top_node, TAG_SVG_ATT_width, 0, 0, &info)==GF_OK) 
				w = info.far_ptr;
			if (gf_svg_get_attribute_by_tag(top_node, TAG_SVG_ATT_height, 0, 0, &info)==GF_OK) 
				h = info.far_ptr;
		}
#ifdef GPAC_ENABLE_SVG_SA
		else if ((tag>=GF_NODE_RANGE_FIRST_SVG_SA) && (tag<=GF_NODE_RANGE_LAST_SVG_SA)) {
			SVG_SA_svgElement *root = (SVG_SA_svgElement *) top_node;
			is_svg = 1;
			w = &root->width;
			h = &root->height;
		}
#endif
#ifdef GPAC_ENABLE_SVG_SANI
		else if ((tag>=GF_NODE_RANGE_FIRST_SVG_SANI) && (tag<=GF_NODE_RANGE_LAST_SVG_SANI)) {
			SVG_SANI_svgElement *root = (SVG_SANI_svgElement*) top_node;
			is_svg = 1;
			w = &root->width;
			h = &root->height;
		}
#endif
		/*default back color is white*/
		if (is_svg && ! (sr->user->init_flags & GF_TERM_WINDOWLESS)) sr->back_color = 0xFFFFFFFF;

		/*hack for SVG where size is set in %*/
		if (!sr->has_size_info && w && h) {
			sr->has_size_info = 1;
			sr->aspect_ratio = GF_ASPECT_RATIO_FILL_SCREEN;
			if (w->type!=SVG_NUMBER_PERCENTAGE) {
				width = FIX2INT(convert_svg_length_to_user(sr, w) );
			} else {
				width = 320; //FIX2INT(root->viewBox.width);
			}
			if (h->type!=SVG_NUMBER_PERCENTAGE) {
				height = FIX2INT(convert_svg_length_to_user(sr, h) );
			} else {
				height = 240; //FIX2INT(root->viewBox.height);
			}
		}
#endif
		/*default back color is key color*/
		if (sr->user->init_flags & GF_TERM_WINDOWLESS) {
			opt = gf_cfg_get_key(sr->user->config, "Rendering", "ColorKey");
			if (opt) {
				u32 r, g, b, a;
				sscanf(opt, "%02X%02X%02X%02X", &a, &r, &g, &b);
				sr->back_color = GF_COL_ARGB(0xFF, r, g, b);
			}
		}

		/*set scene size only if different, otherwise keep scaling/FS*/
		if ( !width || (sr->scene_width!=width) || !height || (sr->scene_height!=height)) {
			do_notif = sr->has_size_info || (!sr->scene_width && !sr->scene_height);
			SR_SetSceneSize(sr, width, height);

			/*get actual size in pixels*/
			width = sr->scene_width;
			height = sr->scene_height;

			if (!sr->user->os_window_handler) {
				/*only notify user if we are attached to a window*/
				do_notif = 0;
				if (sr->video_out->max_screen_width && (width > sr->video_out->max_screen_width))
					width = sr->video_out->max_screen_width;
				if (sr->video_out->max_screen_height && (height > sr->video_out->max_screen_height))
					height = sr->video_out->max_screen_height;

				gf_sr_set_size(sr,width, height);
			}
		}
	}

	SR_ResetFrameRate(sr);	
#ifdef GF_SR_EVENT_QUEUE
	gf_mx_v(sr->ev_mx);
#endif
	
	gf_sr_lock(sr, 0);
	/*here's a nasty trick: the app may respond to this by calling a gf_sr_set_size from a different
	thread, but in an atomic way (typically happen on Win32 when changing the window size). WE MUST
	NOTIFY THE SIZE CHANGE AFTER RELEASING THE RENDERER MUTEX*/
	if (do_notif && sr->user->EventProc) {
		GF_Event evt;
		evt.type = GF_EVENT_SCENE_SIZE;
		evt.size.width = width;
		evt.size.height = height;
		sr->user->EventProc(sr->user->opaque, &evt);
	}
	if (scene_graph)
		sr->draw_next_frame = 1;
	return GF_OK;
}
Ejemplo n.º 2
0
/*handles reception of an SL-PDU, logical or physical*/
void gf_es_receive_sl_packet(GF_ClientService *serv, GF_Channel *ch, char *payload, u32 payload_size, GF_SLHeader *header, GF_Err reception_status)
{
	GF_SLHeader hdr;
	u32 nbAU, OldLength, size, AUSeqNum;
	Bool EndAU, NewAU;

	if (ch->bypass_sl_and_db) {
		GF_SceneDecoder *sdec;
		ch->IsClockInit = 1;
		if (ch->odm->subscene) {
			sdec = (GF_SceneDecoder *)ch->odm->subscene->scene_codec->decio;
		} else {
			sdec = (GF_SceneDecoder *)ch->odm->codec->decio;
		}
		gf_mx_p(ch->mx);
		sdec->ProcessData(sdec, payload, payload_size, ch->esd->ESID, 0, 0);
		gf_mx_v(ch->mx);
		return;
	}

	if (ch->es_state != GF_ESM_ES_RUNNING) return;

	if (ch->skip_sl) {
		Channel_ReceiveSkipSL(serv, ch, payload, payload_size);
		return;
	}
	if (ch->is_raw_channel) {
		ch->CTS = ch->DTS = (u32) (ch->ts_offset + (header->compositionTimeStamp - ch->seed_ts) * 1000 / ch->ts_res);
		if (!ch->IsClockInit) {
			gf_es_check_timing(ch);
		}
		if (payload)
			gf_es_dispatch_raw_media_au(ch, payload, payload_size, ch->CTS);
		return;
	}

	/*physical SL-PDU - depacketize*/
	if (!header) {
		u32 SLHdrLen;
		if (!payload_size) return;
		gf_sl_depacketize(ch->esd->slConfig, &hdr, payload, payload_size, &SLHdrLen);
		payload_size -= SLHdrLen;
		payload += SLHdrLen;
	} else {
		hdr = *header;
	}

	/*we ignore OCRs for the moment*/
	if (hdr.OCRflag) {
		if (!ch->IsClockInit) {
			/*channel is the OCR, re-initialize the clock with the proper OCR*/
			if (gf_es_owns_clock(ch)) {
				u32 OCR_TS;
				/*if SL is mapped from network module(eg not coded), OCR=PCR shall be given in 27Mhz units*/
				if (hdr.m2ts_pcr) {
					OCR_TS = (u32) ( hdr.objectClockReference / 27000);
				} else {
					OCR_TS = (u32) ( (s64) (hdr.objectClockReference) * ch->ocr_scale);
				}
				ch->clock->clock_init = 0;
				gf_clock_set_time(ch->clock, OCR_TS);
				/*many TS streams deployed with HLS have broken PCRs - we will check their consistency
				when receiving the first AU with DTS/CTS on this channel*/
				ch->clock->probe_ocr = 1;
				GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: initializing clock at STB %d from OCR TS %d (origial TS "LLD") - %d buffering - OTB %d\n", ch->esd->ESID, gf_term_get_time(ch->odm->term), OCR_TS, hdr.objectClockReference, ch->clock->Buffering, gf_clock_time(ch->clock) ));
				if (ch->clock->clock_init) ch->IsClockInit = 1;

			}
		}
#if 0
		/*adjust clock if M2TS PCR discontinuity*/
		else if (hdr.m2ts_pcr==2) {
			u32 ck;
			u32 OCR_TS = (u32) ( hdr.objectClockReference / 27000);
			ck = gf_clock_time(ch->clock);
			GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: At OTB %d - OCR Discontinuity OCR: adjusting to %d (origial TS "LLD") - original clock %d\n", ch->esd->ESID, gf_clock_real_time(ch->clock), OCR_TS, hdr.objectClockReference, ck));
//			gf_clock_set_time(ch->clock, (u32) OCR_TS);
		}
		/*compute clock drift*/
		else {
			u32 ck;
			u32 OCR_TS;
			if (hdr.m2ts_pcr) {
				OCR_TS = (u32) ( hdr.objectClockReference / 27000);
			} else {
				OCR_TS = (u32) ( (s64) (hdr.objectClockReference) * ch->ocr_scale);
			}
			ck = gf_clock_time(ch->clock);
			GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: At OTB %d adjusting OCR to %d (origial TS "LLD") - diff %d\n", ch->esd->ESID, gf_clock_real_time(ch->clock), OCR_TS, hdr.objectClockReference, (s32) OCR_TS - (s32) ck));
//			gf_clock_set_time(ch->clock, (u32) OCR_TS);
		}
#else
		{
			u32 ck;
			u32 OCR_TS;
			if (hdr.m2ts_pcr) {
				OCR_TS = (u32) ( hdr.objectClockReference / 27000);
			} else {
				OCR_TS = (u32) ( (s64) (hdr.objectClockReference) * ch->ocr_scale);
			}
			ck = gf_clock_time(ch->clock);
			GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: At OTB %d got OCR %d (origial TS "LLD") - diff %d%s\n", ch->esd->ESID, gf_clock_real_time(ch->clock), OCR_TS, hdr.objectClockReference, (s32) OCR_TS - (s32) ck, (hdr.m2ts_pcr==2) ? " - PCR Discontinuity flag" : "" ));
		}
#endif
		if (!payload_size) return;
	}


	/*check state*/
	if (!ch->codec_resilient && (reception_status==GF_CORRUPTED_DATA)) {
		Channel_WaitRAP(ch);
		return;
	}

	if (!ch->esd->slConfig->useAccessUnitStartFlag) {
		/*no AU signaling - each packet is an AU*/
		if (!ch->esd->slConfig->useAccessUnitEndFlag) 
			hdr.accessUnitEndFlag = hdr.accessUnitStartFlag = 1;
		/*otherwise AU are signaled by end of previous packet*/
		else
			hdr.accessUnitStartFlag = ch->NextIsAUStart;
	}

	/*get RAP*/
	if (ch->esd->slConfig->hasRandomAccessUnitsOnlyFlag) {
		hdr.randomAccessPointFlag = 1;
	} else if ((ch->carousel_type!=GF_ESM_CAROUSEL_MPEG2) && (!ch->esd->slConfig->useRandomAccessPointFlag || ch->codec_resilient) ) {
		ch->stream_state = 0;
	}

	if (ch->esd->slConfig->packetSeqNumLength) {
		if (ch->pck_sn && hdr.packetSequenceNumber) {
			/*repeated -> drop*/
			if (ch->pck_sn == hdr.packetSequenceNumber) {
				GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: repeated packet, droping\n", ch->esd->ESID));
				return;
			}
			/*if codec has no resiliency check packet drops*/
			if (!ch->codec_resilient && !hdr.accessUnitStartFlag) {
				if (ch->pck_sn == (u32) (1<<ch->esd->slConfig->packetSeqNumLength) ) {
					if (hdr.packetSequenceNumber) {
						GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: packet loss, droping & wait RAP\n", ch->esd->ESID));
						Channel_WaitRAP(ch);
						return;
					}
				} else if (ch->pck_sn + 1 != hdr.packetSequenceNumber) {
					GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: packet loss, droping & wait RAP\n", ch->esd->ESID));
					Channel_WaitRAP(ch);
					return;
				}
			}
		}
		ch->pck_sn = hdr.packetSequenceNumber;
	}

	/*if empty, skip the packet*/
	if (hdr.paddingFlag && !hdr.paddingBits) {
		GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: Empty packet - skipping\n", ch->esd->ESID));
		return;
	}
	/*IDLE stream shall be processed*/

	NewAU = 0;
	if (hdr.accessUnitStartFlag) {
		NewAU = 1;
		ch->NextIsAUStart = 0;
		ch->skip_carousel_au = 0;

		/*if we have a pending AU, add it*/
		if (ch->buffer) {
			if (ch->esd->slConfig->useAccessUnitEndFlag) {
				GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: missed end of AU (DTS %d)\n", ch->esd->ESID, ch->DTS));
			}
			if (ch->codec_resilient) {
				if (!ch->IsClockInit) gf_es_check_timing(ch);
				Channel_DispatchAU(ch, 0);
			} else {
				gf_free(ch->buffer);
				ch->buffer = NULL;
				ch->AULength = 0;
				ch->len = ch->allocSize = 0;
			}
		}
		AUSeqNum = hdr.AU_sequenceNumber;
		/*Get CTS */
		if (ch->esd->slConfig->useTimestampsFlag) {
			if (hdr.compositionTimeStampFlag) {
				ch->net_dts = ch->net_cts = hdr.compositionTimeStamp;
				/*get DTS */
				if (hdr.decodingTimeStampFlag) ch->net_dts = hdr.decodingTimeStamp;

#if 0
				/*until clock is not init check seed ts*/
				if (!ch->IsClockInit && (ch->net_dts < ch->seed_ts)) 
					ch->seed_ts = ch->net_dts;
#endif
				

				if (ch->net_cts<ch->seed_ts) {
					u64 diff = ch->seed_ts - ch->net_cts;
					ch->CTS_past_offset = (u32) (diff * 1000 / ch->ts_res) + ch->ts_offset;

					ch->net_dts = ch->net_cts = 0;
					ch->CTS = ch->DTS = gf_clock_time(ch->clock);
				} else {
					if (ch->net_dts>ch->seed_ts) ch->net_dts -= ch->seed_ts;
					else ch->net_dts=0;
					ch->net_cts -= ch->seed_ts;
					ch->CTS_past_offset = 0;

					/*TS Wraping not tested*/
					ch->CTS = (u32) (ch->ts_offset + (s64) (ch->net_cts) * 1000 / ch->ts_res);
					ch->DTS = (u32) (ch->ts_offset + (s64) (ch->net_dts) * 1000 / ch->ts_res);
				}

				if (ch->clock->probe_ocr && gf_es_owns_clock(ch)) {
					s32 diff_ts = ch->DTS;
					diff_ts -= ch->clock->init_time;
					if (ABS(diff_ts) > 10000) {
						GF_LOG(GF_LOG_ERROR, GF_LOG_SYNC, ("[SyncLayer] ES%d: invalid clock reference detected - DTS %d OCR %d - using DTS as OCR\n", ch->DTS, ch->clock->init_time));
						ch->clock->clock_init = 0;
						gf_clock_set_time(ch->clock, ch->DTS-1000);
					}
					ch->clock->probe_ocr = 0;
				}

				ch->no_timestamps = 0;
			} else {
				ch->no_timestamps = 1;
			}
		} else {
			/*use CU duration*/
			if (!ch->IsClockInit) ch->DTS = ch->CTS = ch->ts_offset;

			if (!ch->esd->slConfig->AUSeqNumLength) {
				if (!ch->au_sn) {
					ch->CTS = ch->ts_offset;
					ch->au_sn = 1;
				} else {
					ch->CTS += ch->esd->slConfig->CUDuration;
				}
			} else {
				//use the sequence number to get the TS
				if (AUSeqNum < ch->au_sn) {
					nbAU = ( (1<<ch->esd->slConfig->AUSeqNumLength) - ch->au_sn) + AUSeqNum;
				} else {
					nbAU = AUSeqNum - ch->au_sn;
				}
				ch->CTS += nbAU * ch->esd->slConfig->CUDuration;
			}
		}

		/*if the AU Length is carried in SL, get its size*/
		if (ch->esd->slConfig->AULength > 0) {
			ch->AULength = hdr.accessUnitLength;
		} else {
			ch->AULength = 0;
		}
		/*carousel for repeated AUs.*/
		if (ch->carousel_type) {
/* not used :			Bool use_rap = hdr.randomAccessPointFlag; */

			if (ch->carousel_type==GF_ESM_CAROUSEL_MPEG2) {
				AUSeqNum = hdr.m2ts_version_number_plus_one-1;
				/*mpeg-2 section carrouseling does not take into account the RAP nature of the tables*/


				if (AUSeqNum==ch->au_sn) {
					if (ch->stream_state) {
						ch->stream_state=0;
						GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: MPEG-2 Carousel: tuning in\n", ch->esd->ESID));
					} else {
						ch->skip_carousel_au = 1;
						GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: MPEG-2 Carousel: repeated AU (TS %d) - skipping\n", ch->esd->ESID, ch->CTS));
						return;
					}
				} else {
					GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: MPEG-2 Carousel: updated AU (TS %d)\n", ch->esd->ESID, ch->CTS));
					ch->stream_state=0;
					ch->au_sn = AUSeqNum;
				}
			} else {
				if (hdr.randomAccessPointFlag) {
					/*initial tune-in*/
					if (ch->stream_state==1) {
						GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - tuning in\n", ch->esd->ESID, ch->CTS));
						ch->au_sn = AUSeqNum;
						ch->stream_state = 0;
					}
					/*carousel RAP*/
					else if (AUSeqNum == ch->au_sn) {
						/*error recovery*/
						if (ch->stream_state==2) {
							GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - recovering\n", ch->esd->ESID, ch->CTS));
							ch->stream_state = 0;
						} 
						else {
							ch->skip_carousel_au = 1;
							GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - skipping\n", ch->esd->ESID, ch->CTS));
							return;
						}
					}
					/*regular RAP*/
					else {
						if (ch->stream_state==2) {
							GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - recovering from previous errors\n", ch->esd->ESID, ch->CTS));
						}
 						ch->au_sn = AUSeqNum;
						ch->stream_state = 0;
					}
				} 
				/*regular AU but waiting for RAP*/
				else if (ch->stream_state) {
#if 0
					ch->skip_carousel_au = 1;
					GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Waiting for RAP Carousel - skipping\n", ch->esd->ESID));
					return;
#else
					GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Tuning in before RAP\n", ch->esd->ESID));
#endif
				}
				/*previous packet(s) loss: check for critical or non-critical AUs*/
				else if (reception_status == GF_REMOTE_SERVICE_ERROR) { 
					if (ch->au_sn == AUSeqNum) {
						GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Lost a non critical packet\n", ch->esd->ESID));
					} 
					/*Packet lost are critical*/
					else {
						ch->stream_state = 2;
						GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Lost a critical packet - skipping\n", ch->esd->ESID));
						return;
					}
				} else {
					ch->au_sn = AUSeqNum;
					GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: NON-RAP AU received (TS %d)\n", ch->esd->ESID, ch->DTS));
				}
			}
		}

		/*no carousel signaling, tune-in at first RAP*/
		else if (hdr.randomAccessPointFlag) {
			ch->stream_state = 0;
			GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP AU received\n", ch->esd->ESID));
		}
		/*waiting for RAP, return*/
		else if (ch->stream_state) {
			GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Waiting for RAP - skipping AU (DTS %d)\n", ch->esd->ESID, ch->DTS));
			return;
		}
	}

	/*update the RAP marker on a packet base (to cope with AVC/H264 NALU->AU reconstruction)*/
	if (hdr.randomAccessPointFlag) ch->IsRap = 1;

	/*get AU end state*/	
	OldLength = ch->buffer ? ch->len : 0;
	EndAU = hdr.accessUnitEndFlag;
	if (ch->AULength == OldLength + payload_size) EndAU = 1;
	if (EndAU) ch->NextIsAUStart = 1;

	if (EndAU && !ch->IsClockInit) gf_es_check_timing(ch);

	/* we need to skip all the packets of the current AU in the carousel scenario */
	if (ch->skip_carousel_au == 1) return;

	if (!payload_size && EndAU && ch->buffer) {
		GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: Empty packet, flushing buffer\n", ch->esd->ESID));
		Channel_DispatchAU(ch, 0);
		return;
	}
	if (!payload_size) return;

	/*missed begining, unusable*/
	if (!ch->buffer && !NewAU) {
		if (ch->esd->slConfig->useAccessUnitStartFlag) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_SYNC, ("[SyncLayer] ES%d: missed begin of AU\n", ch->esd->ESID));
		}
		if (ch->codec_resilient) NewAU = 1;
		else return;
	}

	/*Write the Packet payload to the buffer*/
	if (NewAU) {
		/*we should NEVER have a bitstream at this stage*/
		assert(!ch->buffer);
		/*ignore length fields*/
		size = payload_size + ch->media_padding_bytes;
		ch->buffer = (char*)gf_malloc(sizeof(char) * size);
		if (!ch->buffer) {
			assert(0);
			return;
		}

		ch->allocSize = size;
		memset(ch->buffer, 0, sizeof(char) * size);
		ch->len = 0;
	}
	if (!ch->esd->slConfig->usePaddingFlag) hdr.paddingFlag = 0;
	
	if (ch->ipmp_tool) {
		GF_Err e;
		GF_IPMPEvent evt;
		memset(&evt, 0, sizeof(evt));
		evt.event_type=GF_IPMP_TOOL_PROCESS_DATA;
		evt.channel = ch;
		evt.data = payload;
		evt.data_size = payload_size;
		evt.is_encrypted = hdr.isma_encrypted;
		evt.isma_BSO = hdr.isma_BSO;
		e = ch->ipmp_tool->process(ch->ipmp_tool, &evt);

		/*we discard undecrypted AU*/
		if (e) {
			if (e==GF_EOS) {
				gf_es_on_eos(ch);
				/*restart*/
				if (evt.restart_requested) {
					if (ch->odm->parentscene->is_dynamic_scene) {
						gf_scene_restart_dynamic(ch->odm->parentscene, 0);
					} else {
						mediacontrol_restart(ch->odm);
					}
				}
			}
			return;
		}
	}

	if (hdr.paddingFlag && !EndAU) {	
		/*to do - this shouldn't happen anyway */

	} else {
		/*check if enough space*/
		size = ch->allocSize;
		if (size && (payload_size + ch->len <= size)) {
			memcpy(ch->buffer+ch->len, payload, payload_size);
			ch->len += payload_size;
		} else {
			size = payload_size + ch->len + ch->media_padding_bytes;
			ch->buffer = (char*)gf_realloc(ch->buffer, sizeof(char) * size);
			memcpy(ch->buffer+ch->len, payload, payload_size);
			ch->allocSize = size;
			ch->len += payload_size;
		}
		if (hdr.paddingFlag) ch->padingBits = hdr.paddingBits;
	}

	if (EndAU) Channel_DispatchAU(ch, hdr.au_duration);
}
Ejemplo n.º 3
0
static u32 MM_SimulationStep_Decoder(GF_Terminal *term, u32 *nb_active_decs)
{
	CodecEntry *ce;
	GF_Err e;
	u32 count, remain;
	u32 time_taken, time_slice, time_left;

#ifndef GF_DISABLE_LOG
	term->compositor->networks_time = gf_sys_clock();
#endif

//	GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Media Manager] Entering simultion step\n"));
	gf_term_handle_services(term);

#ifndef GF_DISABLE_LOG
	term->compositor->networks_time = gf_sys_clock() - term->compositor->networks_time;
#endif

#ifndef GF_DISABLE_LOG
	term->compositor->decoders_time = gf_sys_clock();
#endif
	gf_mx_p(term->mm_mx);

	count = gf_list_count(term->codecs);
	time_left = term->frame_duration;
	*nb_active_decs = 0;

	if (term->last_codec >= count) term->last_codec = 0;
	remain = count;
	/*this is ultra basic a nice scheduling system would be much better*/
	while (remain) {
		ce = (CodecEntry*)gf_list_get(term->codecs, term->last_codec);
		if (!ce) break;

		if (!(ce->flags & GF_MM_CE_RUNNING) || (ce->flags & GF_MM_CE_THREADED) || ce->dec->force_cb_resize) {
			remain--;
			if (!remain) break;
			term->last_codec = (term->last_codec + 1) % count;
			continue;
		}
		time_slice = ce->dec->Priority * time_left / term->cumulated_priority;
		if (ce->dec->PriorityBoost) time_slice *= 2;
		time_taken = gf_sys_clock();
		(*nb_active_decs) ++;
		e = gf_codec_process(ce->dec, time_slice);
		time_taken = gf_sys_clock() - time_taken;
		/*avoid signaling errors too often...*/
#ifndef GPAC_DISABLE_LOG
		if (e) {
			GF_LOG(GF_LOG_WARNING, GF_LOG_CODEC, ("[ODM%d] Decoding Error %s\n", ce->dec->odm->OD->objectDescriptorID, gf_error_to_string(e) ));
		} else {
			//GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Decode time slice %d ms out of %d ms\n", ce->dec->decio ? ce->dec->decio->module_name : "RAW", time_taken, time_left ));
		}
#endif
		if (ce->flags & GF_MM_CE_DISCARDED) {
			gf_free(ce);
			gf_list_rem(term->codecs, term->last_codec);
			count--;
			if (!count)
				break;
		} else {
			if (ce->dec->CB && (ce->dec->CB->UnitCount >= ce->dec->CB->Min)) ce->dec->PriorityBoost = 0;
		}
		term->last_codec = (term->last_codec + 1) % count;

		remain -= 1;
		if (time_left > time_taken) {
			time_left -= time_taken;
			if (!remain) break;
		} else {
			time_left = 0;
			break;
		}
	}
	gf_mx_v(term->mm_mx);
#ifndef GF_DISABLE_LOG
	term->compositor->decoders_time = gf_sys_clock() - term->compositor->decoders_time;
#endif

	return time_left;
}
Ejemplo n.º 4
0
static void gf_rtp_switch_quality(RTPClient *rtp, Bool switch_up)
{
    u32 i,count;
    RTPStream *ch, *cur_ch;
    GF_NetworkCommand com;

    count = gf_list_count(rtp->channels);
    /*find the current stream*/
    ch = cur_ch = NULL;
    for (i = 0; i < count; i++) {
        cur_ch = (RTPStream *) gf_list_get(rtp->channels, i);
        if (cur_ch->mid != rtp->cur_mid) {
            cur_ch=NULL;
            continue;
        }
        break;
    }
    if (!cur_ch) return;

    if (switch_up)
    {
        /*this is the highest stream*/
        if (!cur_ch->next_stream)
        {
            cur_ch->status = RTP_Running;
            return;
        }
        else
        {
            for (i = 0; i < count; i++) {
                ch = (RTPStream *) gf_list_get(rtp->channels, i);
                if (ch->mid == cur_ch->next_stream)
                {
                    /*resume streaming next channel*/
                    gf_mx_p(rtp->mx);
                    RP_InitStream(ch, 0);
                    gf_mx_v(rtp->mx);
                    ch->status = RTP_Running;
                    rtp->cur_mid = ch->mid;
                    break;
                }

            }
        }
    }
    else
    {
        /*this is the lowest stream i.e base layer*/
        if (!cur_ch->prev_stream)
        {
            cur_ch->status = RTP_Running;
            return;
        }
        else
        {
            for (i = 0; i < count; i++) {
                ch = (RTPStream *) gf_list_get(rtp->channels, i);
                if (ch->mid == cur_ch->prev_stream)
                {
                    /*stop streaming current channel*/
                    gf_rtp_stop(cur_ch->rtp_ch);
                    cur_ch->status = RTP_Connected;
                    com.command_type = GF_NET_CHAN_RESET;
                    com.base.on_channel = cur_ch;
                    gf_service_command(rtp->service, &com, GF_OK);
                    rtp->cur_mid = ch->mid;
                    break;
                }
            }
        }
    }
    GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("Switch from ES%d to ES %d\n", cur_ch->mid, ch->mid));
    return;
}
Ejemplo n.º 5
0
Archivo: decoder.c Proyecto: erelh/gpac
static GF_Err MediaCodec_Process(GF_Codec *codec, u32 TimeAvailable)
{
	GF_CMUnit *CU;
	GF_DBUnit *AU;
	GF_Channel *ch, *prev_ch;
	Bool drop_late_frames = 0;
	u32 mmlevel, cts;
	u32 first, entryTime, now, obj_time, unit_size;
	GF_MediaDecoder *mdec = (GF_MediaDecoder*)codec->decio;
	GF_Err e = GF_OK;
	CU = NULL;

	/*if video codec muted don't decode (try to saves ressources)
	if audio codec muted we dispatch to keep sync in place*/
	if (codec->Muted && (codec->type==GF_STREAM_VISUAL) ) return GF_OK;

	if ( (codec->CB->UnitCount > 1) && (codec->CB->Capacity == codec->CB->UnitCount) )
		return GF_OK;

	entryTime = gf_term_get_time(codec->odm->term);
	if (codec->odm->term->flags & GF_TERM_DROP_LATE_FRAMES)
		drop_late_frames = 1;

	/*fetch next AU in DTS order for this codec*/
	MediaDecoder_GetNextAU(codec, &ch, &AU);
	/*no active channel return*/
	if (!AU || !ch) {
		/*if the codec is in EOS state, assume we're done*/
		if (codec->Status == GF_ESM_CODEC_EOS) {
			/*if codec is reordering, try to flush it*/
			if (codec->is_reordering) {
				if ( LockCompositionUnit(codec, codec->last_unit_cts+1, &CU, &unit_size) == GF_OUT_OF_MEM)
					return GF_OK;
				assert( CU );
				unit_size = 0;
				e = mdec->ProcessData(mdec, NULL, 0, 0, CU->data, &unit_size, 0, 0);
				if (e==GF_OK) {
					e = UnlockCompositionUnit(codec, CU, unit_size);
					if (unit_size) return GF_OK;
				}
			}
			gf_term_stop_codec(codec, 0);
			if (codec->CB) gf_cm_set_eos(codec->CB);
		}
		/*if no data, and channel not buffering, ABORT CB buffer (data timeout or EOS not detectable)*/
		else if (ch && !ch->BufferOn)
			gf_cm_abort_buffering(codec->CB);

		GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d: No data in decoding buffer\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID));
		return GF_OK;
	}

	/*get the object time*/
	obj_time = gf_clock_time(codec->ck);
	/*Media Time for media codecs is updated in the CB*/

	if (!codec->CB) {
		gf_es_drop_au(ch);
		return GF_BAD_PARAM;
	}
	/*we are still flushing our CB - keep the current pending AU and wait for CB resize*/
	if (codec->force_cb_resize) {
		if (codec->CB->UnitCount>1) {
			return GF_OK;
		}
		GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d: Resizing output buffer %d -> %d\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, codec->CB->UnitSize, codec->force_cb_resize));
		ResizeCompositionBuffer(codec, codec->force_cb_resize);
		codec->force_cb_resize=0;
	}

	/*image codecs*/
	if (codec->CB->Capacity == 1) {
		/*a SHA signature is computed for each AU. This avoids decoding/recompositing when identical (for instance streaming a carousel)*/
		u8 new_unit_signature[20];
		gf_sha1_csum((u8*)AU->data, AU->dataLength, new_unit_signature);
		if (!memcmp(codec->last_unit_signature, new_unit_signature, sizeof(new_unit_signature))) {
			codec->nb_repeted_frames++;
			gf_es_drop_au(ch);
			return GF_OK;
		}

		/*usually only one image is tolerated in the stream, but just in case force reset of CB*/
		if (codec->CB->UnitCount && (obj_time>=AU->CTS)) {
			gf_mx_p(codec->odm->mx);
			codec->CB->output->dataLength = 0;
			codec->CB->UnitCount = 0;
			gf_mx_v(codec->odm->mx);
		}

		/*CB is already full*/
		if (codec->CB->UnitCount)
			return GF_OK;

		codec->nb_repeted_frames = 0;
		memcpy(codec->last_unit_signature, new_unit_signature, sizeof(new_unit_signature));

	}

	/*try to refill the full buffer*/
	first = 1;
	while (codec->CB->Capacity > codec->CB->UnitCount) {
	/*set media processing level*/
		mmlevel = GF_CODEC_LEVEL_NORMAL;
		/*SEEK: if the last frame had the same TS, we are seeking. Ask the codec to drop*/
		if (!ch->skip_sl && codec->last_unit_cts && (codec->last_unit_cts == AU->CTS) && !ch->esd->dependsOnESID) {
			mmlevel = GF_CODEC_LEVEL_SEEK;
			/*object clock is paused by media control or terminal is paused: exact frame seek*/
			if (
#ifndef GPAC_DISABLE_VRML
				(codec->ck->mc && codec->ck->mc->paused) ||
#endif
				(codec->odm->term->play_state)
			) {
				gf_cm_rewind_input(codec->CB);
				mmlevel = GF_CODEC_LEVEL_NORMAL;
				/*force staying in step-mode*/
				codec->odm->term->compositor->step_mode=1;
			}
		}
		/*only perform drop in normal playback*/
		else if (codec->CB->Status == CB_PLAY) {
			/*extremely late, set the level to drop
			 NOTE: the 100 ms safety gard is to avoid discarding audio*/
			if (!ch->skip_sl && (AU->CTS + (codec->is_reordering ? 1000 : 100) < obj_time) ) {
				mmlevel = GF_CODEC_LEVEL_DROP;
				GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d: frame too late (%d vs %d) - using drop level\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, AU->CTS, obj_time));

				if (ch->resync_drift && (AU->CTS + ch->resync_drift < obj_time)) {
					ch->clock->StartTime += (obj_time - AU->CTS);
					GF_LOG(GF_LOG_WARNING, GF_LOG_CODEC, ("[%s] ODM%d: decoder too slow on OCR stream - rewinding clock of %d ms\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, obj_time - AU->CTS));
					obj_time = gf_clock_time(codec->ck);
				}
			}
			/*we are late according to the media manager*/
			else if (codec->PriorityBoost) {
				mmlevel = GF_CODEC_LEVEL_VERY_LATE;
			}
			/*otherwise we must have an idea of the load in order to set the right level
			use the composition buffer for that, only on the first frame*/
			else if (first) {
				//if the CB is almost empty set to very late
				if (codec->CB->UnitCount <= codec->CB->Min+1) {
					mmlevel = GF_CODEC_LEVEL_VERY_LATE;
				} else if (codec->CB->UnitCount * 2 <= codec->CB->Capacity) {
					mmlevel = GF_CODEC_LEVEL_LATE;
				}
				first = 0;
			}
		}

		if (ch->skip_sl) {
			if (codec->bytes_per_sec) {
				AU->CTS = codec->last_unit_cts + ch->ts_offset + codec->cur_audio_bytes * 1000 / codec->bytes_per_sec;
			} else if (codec->fps) {
				AU->CTS = codec->last_unit_cts + ch->ts_offset + (u32) (codec->cur_video_frames * 1000 / codec->fps);
			}
		}
		if ( LockCompositionUnit(codec, AU->CTS, &CU, &unit_size) == GF_OUT_OF_MEM) {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because no more space in composition buffer\n", codec->decio->module_name ));
			return GF_OK;
		}

scalable_retry:

		now = gf_term_get_time(codec->odm->term);

		assert( CU );
		if (!CU->data && unit_size)
			e = GF_OUT_OF_MEM;
		else
			e = mdec->ProcessData(mdec, AU->data, AU->dataLength, ch->esd->ESID, CU->data, &unit_size, AU->PaddingBits, mmlevel);
		now = gf_term_get_time(codec->odm->term) - now;
		if (codec->Status == GF_ESM_CODEC_STOP) {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because codec has been stopped\n", codec->decio->module_name));
			return GF_OK;
		}
		/*input is too small, resize composition memory*/
		switch (e) {
		case GF_BUFFER_TOO_SMALL:
			/*release but no dispatch*/
			UnlockCompositionUnit(codec, CU, 0);

			/*if we have pending media do wait! - this shoud be fixed by avoiding to destroy the CB ... */
			if (codec->CB->UnitCount>1) {
				GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d ES%d: Resize output buffer requested\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID));
				codec->force_cb_resize = unit_size;
				return GF_OK;
			}
			GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d ES%d: Resizing output buffer %d -> %d\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, codec->CB->UnitSize, unit_size));
			ResizeCompositionBuffer(codec, unit_size);
			continue;

		/*this happens a lot when using non-MPEG-4 streams (ex: ffmpeg demuxer)*/
		case GF_PACKED_FRAMES:
			/*in seek don't dispatch any output*/
			if (mmlevel >= GF_CODEC_LEVEL_DROP) {
				if (drop_late_frames)
					unit_size = 0;
				else
					ch->clock->last_TS_rendered = codec->CB->LastRenderedTS;
			}
			e = UnlockCompositionUnit(codec, CU, unit_size);

			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoded packed frame TS %d in %d ms\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now));
			if (ch->skip_sl) {
				if (codec->bytes_per_sec) {
					codec->cur_audio_bytes += unit_size;
				} else if (codec->fps && unit_size) {
					codec->cur_video_frames += 1;
				}
			} else {
				u32 deltaTS = 0;
				if (codec->bytes_per_sec) {
					deltaTS = unit_size * 1000 / codec->bytes_per_sec;
				} /*else if (0 && codec->fps && unit_size) {
					deltaTS = (u32) (1000.0f / codec->fps);
				} */else {
					deltaTS = (AU->DTS - codec->last_unit_dts);
				}
				AU->CTS += deltaTS;
			}
			codec_update_stats(codec, 0, now);
			continue;

		/*for all cases below, don't release the composition buffer until we are sure we are not
		processing a scalable stream*/
		case GF_OK:
			if (unit_size) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d ES%d at %d decoded frame TS %d in %d ms (DTS %d - size %d) - %d in CB\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now, AU->DTS, AU->dataLength, codec->CB->UnitCount + 1));
			}
			/*if no size the decoder is not using the composition memory - if the object is in intitial buffering resume it!!*/
			else if (codec->CB->Status == CB_BUFFER) {
				codec->nb_dispatch_skipped++;
				if (codec->nb_dispatch_skipped==codec->CB->UnitCount)
					gf_cm_abort_buffering(codec->CB);
			}

			codec_update_stats(codec, AU->dataLength, now);
			if (ch->skip_sl) {
				if (codec->bytes_per_sec) {
					codec->cur_audio_bytes += unit_size;
					while (codec->cur_audio_bytes>codec->bytes_per_sec) {
						codec->cur_audio_bytes -= codec->bytes_per_sec;
						codec->last_unit_cts += 1000;
					}
				} else if (codec->fps && unit_size) {
					codec->cur_video_frames += 1;
				}
			}
#ifndef GPAC_DISABLE_LOGS
			if (codec->odm->flags & GF_ODM_PREFETCH) {
				GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d ES%d At %d decoding frame TS %d in prefetch mode\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS));
			}
#endif
			break;
		default:
			unit_size = 0;
			/*error - if the object is in intitial buffering resume it!!*/
			gf_cm_abort_buffering(codec->CB);
			GF_LOG(GF_LOG_INFO, GF_LOG_CODEC, ("[%s] ODM%d ES%d At %d (frame TS %d - %d ms ): decoded error %s\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, gf_clock_real_time(ch->clock), AU->CTS, now, gf_error_to_string(e) ));
			e = GF_OK;
			break;
		}

		codec->last_unit_dts = AU->DTS;
		/*remember base layer timing*/
		if (!ch->esd->dependsOnESID && !ch->skip_sl) {
			codec->last_unit_cts = AU->CTS;
			codec->first_frame_processed = 1;
		}

		/*store current CTS*/
		cts = AU->CTS;
		prev_ch = ch;

		gf_es_drop_au(ch);
		AU = NULL;

		if (e) {
			UnlockCompositionUnit(codec, CU, unit_size);
			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because error %s\n", codec->decio->module_name, gf_error_to_string(e) ));
			return e;
		}

		MediaDecoder_GetNextAU(codec, &ch, &AU);
		/*same CTS: same output, likely scalable stream so don't release the CB*/
		if (AU && (AU->CTS == cts) && (ch != prev_ch) ) {
			unit_size = codec->CB->UnitSize;
			goto scalable_retry;
		}

		/*in seek don't dispatch any output*/
		if (mmlevel >= GF_CODEC_LEVEL_DROP) {
			if (drop_late_frames || (mmlevel == GF_CODEC_LEVEL_SEEK) )
				unit_size = 0;
			else
				ch->clock->last_TS_rendered = codec->CB->LastRenderedTS;
		}

		UnlockCompositionUnit(codec, CU, unit_size);
		if (!ch || !AU) {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because no more input data\n", codec->decio->module_name));
			return GF_OK;
		}
		now = gf_term_get_time(codec->odm->term) - entryTime;
		/*escape from decoding loop only if above critical limit - this is to avoid starvation on audio*/
		if (!ch->esd->dependsOnESID && (codec->CB->UnitCount > codec->CB->Min)) {
			if (now >= TimeAvailable) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because time is up: %d vs %d available\n", codec->decio->module_name, now, TimeAvailable));
				return GF_OK;
			}
		} else if (now >= 10*TimeAvailable) {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] Exit decode loop because running for too long: %d vs %d available\n", codec->decio->module_name, now, TimeAvailable));
			return GF_OK;
		}
		MediaDecoder_GetNextAU(codec, &ch, &AU);
		if (!ch || !AU) return GF_OK;
	}
	return GF_OK;
}
Ejemplo n.º 6
0
static void RP_RemoveCommand(RTSPSession *sess)
{
	gf_mx_p(sess->owner->mx);
	gf_list_rem(sess->rtsp_commands, 0);
	gf_mx_v(sess->owner->mx);
}
Ejemplo n.º 7
0
GF_Err SDLVid_SetFullScreen(GF_VideoOutput *dr, u32 bFullScreenOn, u32 *screen_width, u32 *screen_height)
{
	u32 bpp, pref_bpp;
	SDLVID();

	if (ctx->fullscreen==bFullScreenOn) return GF_OK;

	/*lock to get sure the event queue is not processed under X*/
	gf_mx_p(ctx->evt_mx);
	ctx->fullscreen = bFullScreenOn;

	pref_bpp = bpp = ctx->screen->format->BitsPerPixel;

	if (ctx->fullscreen) {
		u32 flags;
		Bool switch_res = 0;
		const char *sOpt = gf_modules_get_option((GF_BaseInterface *)dr, "Video", "SwitchResolution");
		if (sOpt && !stricmp(sOpt, "yes")) switch_res = 1;
		if (!dr->max_screen_width || !dr->max_screen_height) switch_res = 1;

		flags = (ctx->output_3d_type==1) ? SDL_GL_FULLSCREEN_FLAGS : SDL_FULLSCREEN_FLAGS;
		ctx->store_width = *screen_width;
		ctx->store_height = *screen_height;
		if (switch_res) {
			u32 i;
			ctx->fs_width = *screen_width;
			ctx->fs_height = *screen_height;
			for(i=0; i<nb_video_modes; i++) {
				if (ctx->fs_width<=video_modes[2*i] && ctx->fs_height<=video_modes[2*i + 1]) {
					if ((pref_bpp = SDL_VideoModeOK(video_modes[2*i], video_modes[2*i+1], bpp, flags))) {
						ctx->fs_width = video_modes[2*i];
						ctx->fs_height = video_modes[2*i + 1];
						break;
					}
				}
			}
		} else {
			ctx->fs_width = dr->max_screen_width;
			ctx->fs_height = dr->max_screen_height;
		}
		ctx->screen = SDL_SetVideoMode(ctx->fs_width, ctx->fs_height, pref_bpp, flags);
		/*we switched bpp, clean all objects*/
		if (bpp != pref_bpp) SDLVid_DestroyObjects(ctx);
		*screen_width = ctx->fs_width;
		*screen_height = ctx->fs_height;
		/*GL has changed*/
		if (ctx->output_3d_type==1) {
			GF_Event evt;
			memset(&evt, 0, sizeof(GF_Event));
			evt.type = GF_EVENT_VIDEO_SETUP;
			dr->on_event(dr->evt_cbk_hdl, &evt);
		}
	} else {
		SDLVid_ResizeWindow(dr, ctx->store_width, ctx->store_height);
		*screen_width = ctx->store_width;
		*screen_height = ctx->store_height;
	}
	gf_mx_v(ctx->evt_mx);
	if (!ctx->screen) return GF_IO_ERR;
	return GF_OK;
}
Ejemplo n.º 8
0
static u32 ts_interleave_thread_run(void *param) {
    GF_AbstractTSMuxer * mux = (GF_AbstractTSMuxer *) param;
    AVStream * video_st = mux->video_st;
    AVStream * audio_st = mux->audio_st;
    u64 audio_pts, video_pts;
    u64 audioSize, videoSize, videoKbps, audioKbps;
    u32 pass;
    u32 now, start;
    /* open the output file, if needed */
    if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) {
        if (url_fopen(&mux->oc->pb, mux->destination, URL_WRONLY) < 0) {
            fprintf(stderr, "Could not open '%s'\n", mux->destination);
            return 0;
        }
    }
    /* write the stream header, if any */
    av_write_header(mux->oc);
    audio_pts = video_pts = 0;
    // Buffering...
    gf_sleep(1000);
    now = start = gf_sys_clock();
    audioSize = videoSize = 0;
    audioKbps = videoKbps = 0;
    pass = 0;
    while ( mux->encode) {
        pass++;
        if (0== (pass%16)) {
            now = gf_sys_clock();
            if (now - start > 1000) {
                videoKbps = videoSize * 8000 / (now-start) / 1024;
                audioKbps = audioSize * 8000 / (now-start) / 1024;
                audioSize = videoSize = 0;
                start = now;
                GF_LOG(GF_LOG_DEBUG, GF_LOG_MODULE, ("\rPTS audio="LLU" ("LLU"kbps), video="LLU" ("LLU"kbps)", audio_pts, audioKbps, video_pts, videoKbps));
            }
        }
        /* write interleaved audio and video frames */
        if (!video_st ||
                (audio_pts == AV_NOPTS_VALUE && has_packet_ready(mux, mux->audioMx, &mux->audioPackets)) ||
                ((audio_st && audio_pts < video_pts && audio_pts!= AV_NOPTS_VALUE))) {
            AVPacketList * pl = wait_for_packet(mux, mux->audioMx, &mux->audioPackets);
            if (!pl)
                goto exit;
            audio_pts = pl->pkt.pts ;
            audioSize+=pl->pkt.size;
            if (pl->pkt.pts == AV_NOPTS_VALUE) {
                pl->pkt.pts = 0;
            }
            if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) {
                GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write audio interleaved frame audio_pts="LLU", video_pts="LLU"\n", audio_pts, video_pts));
            }
            gf_free(pl);
        } else {
            AVPacketList * pl = wait_for_packet(mux, mux->videoMx, &mux->videoPackets);
            if (!pl)
                goto exit;
            video_pts = pl->pkt.pts;
            /* write the compressed frame in the media file */
            if (0 && audio_pts != AV_NOPTS_VALUE && audio_pts > video_pts && pl->next) {
                u32 skipped = 0;
                u64 first = video_pts;
                /* We may be too slow... */
                gf_mx_p(mux->videoMx);
                while (video_pts < audio_pts && pl->next) {
                    AVPacketList * old = pl;
                    // We skip frames...
                    pl = pl->next;
                    video_pts = pl->pkt.pts;
                    skipped++;
                    gf_free(old);
                }
                mux->videoPackets = pl->next;
                gf_mx_v(mux->videoMx);
                if (skipped > 0)
                    GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("Skipped %u video frames, frame was "LLU", but is now "LLU"\n", skipped, first, video_pts));
            }
            videoSize+=pl->pkt.size;
            video_pts = pl->pkt.pts; // * video_st->time_base.num / video_st->time_base.den;
            assert( video_pts);
            if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) {
                GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write video interleaved frame audio_pts="LLU", video_pts="LLU"\n", audio_pts, video_pts));
            }
            gf_free(pl);
        }
		gf_sleep(1);
    }
exit:
    GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] Ending TS thread...\n"));
    av_write_trailer(mux->oc);
    if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) {
        /* close the output file */
        url_fclose(mux->oc->pb);
    }
    return 0;
}
Ejemplo n.º 9
0
Archivo: read.c Proyecto: Bevara/GPAC
void isor_net_io(void *cbk, GF_NETIO_Parameter *param)
{
	GF_Err e;
	u32 size = 0;
	char *local_name;
	ISOMReader *read = (ISOMReader *) cbk;

	/*handle service message*/
	gf_service_download_update_stats(read->dnload);

	if (param->msg_type==GF_NETIO_DATA_TRANSFERED) {
		e = GF_EOS;
	} else if (param->msg_type==GF_NETIO_DATA_EXCHANGE) {
		e = GF_OK;
		size = param->size;
	} else {
		e = param->error;
	}

	if (e<GF_OK) {
		/*error opening service*/
		if (!read->mov) {
			/* if there is an intermediate between this module and the terminal, report to it */
			if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
				send_proxy_command(read, GF_FALSE, GF_FALSE, e, NULL, NULL);
			} else {
				gf_service_connect_ack(read->service, NULL, e);
			}
		}
		return;
	}

	/*open file if not done yet (bad interleaving)*/
	if (e==GF_EOS) {
		const char *local_name;
		if (read->mov) return;
		local_name = gf_dm_sess_get_cache_name(read->dnload);
		if (!local_name) {
			if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
				send_proxy_command(read, GF_FALSE, GF_FALSE, GF_SERVICE_ERROR, NULL, NULL);
			} else {
				gf_service_connect_ack(read->service, NULL, GF_SERVICE_ERROR);
			}
			return;
		}
		e = GF_OK;
		read->mov = gf_isom_open(local_name, GF_ISOM_OPEN_READ, NULL);
		if (!read->mov) e = gf_isom_last_error(NULL);
		else read->time_scale = gf_isom_get_timescale(read->mov);
		if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
			send_proxy_command(read, GF_FALSE, GF_FALSE, GF_OK, NULL, NULL);
		} else {
			gf_service_connect_ack(read->service, NULL, GF_OK);
		}
		if (read->no_service_desc) isor_declare_objects(read);
	}

	if (!size) return;

	/*service is opened, nothing to do*/
	if (read->mov) {
		isor_check_buffer_level(read);

		/*end of chunk*/
		if (read->frag_type && (param->reply==1) ) {
			u64 bytesMissing = 0;
			gf_mx_p(read->segment_mutex);
			e = gf_isom_refresh_fragmented(read->mov, &bytesMissing, NULL);
			gf_mx_v(read->segment_mutex);
		}
		return;
	}

	/*try to open the service*/
	local_name = (char *)gf_dm_sess_get_cache_name(read->dnload);
	if (!local_name) {
		if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
			send_proxy_command(read, GF_FALSE, GF_FALSE, GF_SERVICE_ERROR, NULL, NULL);
		} else {
			gf_service_connect_ack(read->service, NULL, GF_SERVICE_ERROR);
		}
		return;
	}

	/*not enogh data yet*/
	if (read->missing_bytes && (read->missing_bytes > size) ) {
		read->missing_bytes -= size;
		return;
	}

	e = gf_isom_open_progressive(local_name, 0, 0, &read->mov, &read->missing_bytes);
	switch (e) {
	case GF_ISOM_INCOMPLETE_FILE:
		return;
	case GF_OK:
		break;
	default:
		if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
			send_proxy_command(read, GF_FALSE, GF_FALSE, e, NULL, NULL);
		} else {
			gf_service_connect_ack(read->service, NULL, e);
		}
		return;
	}
	read->frag_type = gf_isom_is_fragmented(read->mov) ? 1 : 0;

	/*ok let's go, we can setup the decoders */
	read->time_scale = gf_isom_get_timescale(read->mov);
	if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
		send_proxy_command(read, GF_FALSE, GF_FALSE, GF_OK, NULL, NULL);
	} else {
		gf_service_connect_ack(read->service, NULL, GF_OK);
	}

	if (read->no_service_desc) isor_declare_objects(read);
}
Ejemplo n.º 10
0
Archivo: read.c Proyecto: Bevara/GPAC
GF_Err ISOR_ServiceCommand(GF_InputService *plug, GF_NetworkCommand *com)
{
	Double track_dur, media_dur;
	ISOMChannel *ch;
	ISOMReader *read;
	u32 count, i;

	if (!plug || !plug->priv || !com) return GF_SERVICE_ERROR;
	read = (ISOMReader *) plug->priv;
	if (read->disconnected) return GF_OK;

	if (com->command_type==GF_NET_SERVICE_INFO) {
		u32 tag_len;
		const char *tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_NAME, &tag, &tag_len)==GF_OK) com->info.name = tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_ARTIST, &tag, &tag_len)==GF_OK) com->info.artist = tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_ALBUM, &tag, &tag_len)==GF_OK) com->info.album = tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_COMMENT, &tag, &tag_len)==GF_OK) com->info.comment = tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_TRACK, &tag, &tag_len)==GF_OK) {
			com->info.track_info = (((tag[2]<<8)|tag[3]) << 16) | ((tag[4]<<8)|tag[5]);
		}
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_COMPOSER, &tag, &tag_len)==GF_OK) com->info.composer = tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_WRITER, &tag, &tag_len)==GF_OK) com->info.writer = tag;
		if (gf_isom_apple_get_tag(read->mov, GF_ISOM_ITUNE_GENRE, &tag, &tag_len)==GF_OK) {
			if (tag[0]) {
				com->info.genre = 0;
			} else {
				com->info.genre = (tag[0]<<8) | tag[1];
			}
		}
		return GF_OK;
	}
	if (com->command_type==GF_NET_SERVICE_HAS_AUDIO) {
		u32 i, count;
		count = gf_isom_get_track_count(read->mov);
		for (i=0; i<count; i++) {
			if (gf_isom_get_media_type(read->mov, i+1) == GF_ISOM_MEDIA_AUDIO) return GF_OK;
		}
		return GF_NOT_SUPPORTED;
	}

	if (com->command_type == GF_NET_SERVICE_QUALITY_SWITCH)
	{
		count = gf_list_count(read->channels);
		for (i = 0; i < count; i++)
		{
			ch = (ISOMChannel *)gf_list_get(read->channels, i);
			if (gf_isom_has_scalable_layer(read->mov)) {
				ch->next_track = gf_channel_switch_quality(ch, read->mov, com->switch_quality.up);
			}
		}
		return GF_OK;
	}
	if (com->command_type == GF_NET_SERVICE_PROXY_DATA_RECEIVE) {
		isor_flush_data(read, 1, com->proxy_data.is_chunk);
		return GF_OK;
	}
	if (com->command_type == GF_NET_SERVICE_FLUSH_DATA) {
		if (plug->query_proxy)
			isor_flush_data(read, 0, 0);
		return GF_OK;
	}

	if (!com->base.on_channel) return GF_NOT_SUPPORTED;

	ch = isor_get_channel(read, com->base.on_channel);
	if (!ch) return GF_STREAM_NOT_FOUND;

	switch (com->command_type) {
	case GF_NET_CHAN_SET_PADDING:
		if (!ch->track) return GF_OK;
		gf_isom_set_sample_padding(read->mov, ch->track, com->pad.padding_bytes);
		return GF_OK;
	case GF_NET_CHAN_SET_PULL:
		//we don't pull in DASH base services, we flush as soon as we have a complete segment
#ifndef DASH_USE_PULL
		if (read->input->proxy_udta && !read->input->proxy_type)
			return GF_NOT_SUPPORTED;
#endif

		ch->is_pulling = 1;
		return GF_OK;
	case GF_NET_CHAN_INTERACTIVE:
		return GF_OK;
	case GF_NET_CHAN_BUFFER:
		//dash or HTTP, do rebuffer if not disabled
		if (plug->query_proxy) {
		} else if (read->dnload) {
			ch->buffer_min = com->buffer.min;
			ch->buffer_max = com->buffer.max;
		} else {
			com->buffer.max = com->buffer.min = 0;
		}
		return GF_OK;
	case GF_NET_CHAN_DURATION:
		if (!ch->track) {
			com->duration.duration = 0;
			return GF_OK;
		}
		ch->duration = gf_isom_get_track_duration(read->mov, ch->track);
		track_dur = (Double) (s64) ch->duration;
		track_dur /= read->time_scale;
		if (gf_isom_get_edit_segment_count(read->mov, ch->track)) {
			com->duration.duration = (Double) track_dur;
			ch->duration = (u32) (track_dur * ch->time_scale);
		} else {
			/*some file indicate a wrong TrackDuration, get the longest*/
			ch->duration = gf_isom_get_media_duration(read->mov, ch->track);
			media_dur = (Double) (s64) ch->duration;
			media_dur /= ch->time_scale;
			com->duration.duration = MAX(track_dur, media_dur);
		}
		return GF_OK;
	case GF_NET_CHAN_PLAY:

		isor_reset_reader(ch);
		gf_mx_p(read->segment_mutex);
		ch->speed = com->play.speed;
		gf_mx_v(read->segment_mutex);
		ch->start = ch->end = 0;
		if (com->play.speed>0) {
			if (com->play.start_range>=0) {
				ch->start = (u64) (s64) (com->play.start_range * ch->time_scale);
				ch->start = check_round(ch, ch->start, com->play.start_range, 1);
			}
			if (com->play.end_range >= com->play.start_range) {
				ch->end = (u64) (s64) (com->play.end_range*ch->time_scale);
				ch->end = check_round(ch, ch->end, com->play.end_range, 0);
			}
		} else if (com->play.speed<0) {
			Double end = com->play.end_range;
			if (end==-1) end = 0;
			ch->start = (u64) (s64) (com->play.start_range * ch->time_scale);
			if (end <= com->play.start_range)
				ch->end = (u64) (s64) (end  * ch->time_scale);
		}
		ch->is_playing = 1;
		if (com->play.dash_segment_switch) ch->wait_for_segment_switch = 1;
		GF_LOG(GF_LOG_DEBUG, GF_LOG_NETWORK, ("[IsoMedia] Starting channel playback "LLD" to "LLD" (%g to %g)\n", ch->start, ch->end, com->play.start_range, com->play.end_range));

		//and check buffer level on play request
		isor_check_buffer_level(read);
		return GF_OK;
	case GF_NET_CHAN_STOP:
		isor_reset_reader(ch);
		return GF_OK;

	case GF_NET_CHAN_SET_SPEED:
		gf_mx_p(read->segment_mutex);
		ch->speed = com->play.speed;
		gf_mx_v(read->segment_mutex);
		return GF_OK;
	/*nothing to do on MP4 for channel config*/
	case GF_NET_CHAN_CONFIG:
		return GF_OK;
	case GF_NET_CHAN_GET_PIXEL_AR:
		return gf_isom_get_pixel_aspect_ratio(read->mov, ch->track, 1, &com->par.hSpacing, &com->par.vSpacing);
	case GF_NET_CHAN_GET_DSI:
	{
		/*it may happen that there are conflicting config when using ESD URLs...*/
		GF_DecoderConfig *dcd = gf_isom_get_decoder_config(read->mov, ch->track, 1);
		com->get_dsi.dsi = NULL;
		com->get_dsi.dsi_len = 0;
		if (dcd) {
			if (dcd->decoderSpecificInfo) {
				com->get_dsi.dsi = dcd->decoderSpecificInfo->data;
				com->get_dsi.dsi_len = dcd->decoderSpecificInfo->dataLength;
				dcd->decoderSpecificInfo->data = NULL;
			}
			gf_odf_desc_del((GF_Descriptor *) dcd);
		}
		return GF_OK;
	}
	case GF_NET_CHAN_NALU_MODE:
		ch->nalu_extract_mode = GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG;
		ch->disable_seek = 1;
		//when this is set, we work in real scalable (eg N streams reassembled by the player) so only extract the layer. This wll need refinements if we plan to support
		//several scalable layers ...
		if (com->nalu_mode.extract_mode==1) {
			ch->nalu_extract_mode |= GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG | GF_ISOM_NALU_EXTRACT_VDRD_FLAG | GF_ISOM_NALU_EXTRACT_LAYER_ONLY;
		}
		gf_isom_set_nalu_extract_mode(ch->owner->mov, ch->track, ch->nalu_extract_mode);
		break;
	default:
		break;
	}
	return GF_NOT_SUPPORTED;
}
Ejemplo n.º 11
0
Bool gf_sc_fit_world_to_screen(GF_Compositor *compositor)
{
	GF_TraverseState tr_state;
	SFVec3f pos, diff;
	Fixed dist, d;
	GF_Camera *cam;
	GF_Node *top;

#ifndef GPAC_DISABLE_VRML
//	if (gf_list_count(compositor->visual->back_stack)) return;
	if (gf_list_count(compositor->visual->view_stack)) return 0;
#endif

	gf_mx_p(compositor->mx);
	top = gf_sg_get_root_node(compositor->scene);
	if (!top) {
		gf_mx_v(compositor->mx);
		return 0;
	}
	memset(&tr_state, 0, sizeof(GF_TraverseState));
	tr_state.traversing_mode = TRAVERSE_GET_BOUNDS;
	tr_state.visual = compositor->visual;
	gf_node_traverse(top, &tr_state);
	if (gf_node_dirty_get(top)) {
		tr_state.bbox.is_set = 0;
	}

	if (!tr_state.bbox.is_set) {
		gf_mx_v(compositor->mx);
		/*empty world ...*/
		if (tr_state.bbox.radius==-1) return 1;
		/*2D world with 3D camera forced*/
		if (tr_state.bounds.width&&tr_state.bounds.height) return 1;
		return 0;
	}

	cam = &compositor->visual->camera;

	cam->world_bbox = tr_state.bbox;
	/*fit is based on bounding sphere*/
	dist = gf_divfix(tr_state.bbox.radius, gf_sin(cam->fieldOfView/2) );
	gf_vec_diff(diff, cam->center, tr_state.bbox.center);
	/*do not update if camera is outside the scene bounding sphere and dist is too close*/
	if (gf_vec_len(diff) > tr_state.bbox.radius + cam->radius) {
		gf_vec_diff(diff, cam->vp_position, tr_state.bbox.center);
		d = gf_vec_len(diff);
		if (d<dist) {
			gf_mx_v(compositor->mx);
			return 1;
		}
	}
		
	diff = gf_vec_scale(camera_get_pos_dir(cam), dist);
	gf_vec_add(pos, tr_state.bbox.center, diff);
	diff = cam->position;
	camera_set_vectors(cam, pos, cam->vp_orientation, cam->fieldOfView);
	cam->position = diff;
	camera_move_to(cam, pos, cam->target, cam->up);
	cam->examine_center = tr_state.bbox.center;
	cam->flags |= CF_STORE_VP;
	if (cam->z_far < dist) cam->z_far = 10*dist;
	camera_changed(compositor, cam);
	gf_mx_v(compositor->mx);
	return 1;
}
Ejemplo n.º 12
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;

	while (ffd->is_running) {
		//nothing connected, wait
		if (!ffd->video_ch && !ffd->audio_ch) {
			gf_sleep(100);
			continue;
		}

		if ((ffd->seek_time>=0) && ffd->seekable) {
			seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			ffd->seek_time = -1;
		}
		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) ) {
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

			gf_service_send_packet(ffd->service, ffd->audio_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;
			slh.randomAccessPointFlag = pkt.flags&AV_PKT_FLAG_KEY ? 1 : 0;
			gf_service_send_packet(ffd->service, ffd->video_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (ffd->audio_run || ffd->video_run) {
			gf_service_command(ffd->service, &com, GF_OK);
			if (com.buffer.occupancy < com.buffer.max)
				break;

			gf_sleep(1);
		}

		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_service_send_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_service_send_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Ejemplo n.º 13
0
static GF_Err RP_ServiceCommand(GF_InputService *plug, GF_NetworkCommand *com)
{
    RTPStream *ch;
    RTPClient *priv = (RTPClient *)plug->priv;


    if (com->command_type==GF_NET_SERVICE_HAS_AUDIO) {
        u32 i;
        for (i=0; i<gf_list_count(priv->channels); i++) {
            ch = gf_list_get(priv->channels, i);
            if (ch->depacketizer->sl_map.StreamType==GF_STREAM_AUDIO)
                return GF_OK;
        }
        return GF_NOT_SUPPORTED;
    }
    if (com->command_type==GF_NET_SERVICE_MIGRATION_INFO) {
        RP_SaveSessionState(priv);
        priv->session_migration=1;
        if (priv->session_state_data) {
            com->migrate.data = priv->session_state_data;
            com->migrate.data_len = (u32) strlen(priv->session_state_data);
            return GF_OK;
        }
        return GF_NOT_SUPPORTED;
    }

    if (com->command_type == GF_NET_SERVICE_QUALITY_SWITCH)
    {
        gf_rtp_switch_quality(priv, com->switch_quality.up);
        return GF_OK;
    }

    /*ignore commands other than channels one*/
    if (!com->base.on_channel) {
        if (com->command_type==GF_NET_IS_CACHABLE) return GF_OK;
        return GF_NOT_SUPPORTED;
    }

    ch = RP_FindChannel(priv, com->base.on_channel, 0, NULL, 0);
    if (!ch) return GF_STREAM_NOT_FOUND;

    switch (com->command_type) {
    case GF_NET_CHAN_SET_PULL:
        if (ch->rtp_ch || ch->rtsp || !ch->control) return GF_NOT_SUPPORTED;
        /*embedded channels work in pull mode*/
        if (strstr(ch->control, "data:application/")) return GF_OK;
        return GF_NOT_SUPPORTED;
    case GF_NET_CHAN_INTERACTIVE:
        /*looks like pure RTP / multicast etc, not interactive*/
        if (!ch->control) return GF_NOT_SUPPORTED;
        /*emulated broadcast mode*/
        else if (ch->flags & RTP_FORCE_BROADCAST) return GF_NOT_SUPPORTED;
        /*regular rtsp mode*/
        else if (ch->flags & RTP_HAS_RANGE) return GF_OK;
        /*embedded data*/
        else if (strstr(ch->control, "application")) return GF_OK;
        return GF_NOT_SUPPORTED;
    case GF_NET_CHAN_BUFFER:
        if (!(ch->rtp_ch || ch->rtsp || !ch->control)) {
            com->buffer.max = com->buffer.min = 0;
        } else {
            const char *opt;
            /*amount of buffering in ms*/
            opt = gf_modules_get_option((GF_BaseInterface *)plug, "Network", "BufferLength");
            com->buffer.max = opt ? atoi(opt) : 1000;
            /*rebuffer low limit in ms - if the amount of buffering is less than this, rebuffering will never occur*/
            opt = gf_modules_get_option((GF_BaseInterface *)plug, "Network", "RebufferLength");
            if (opt) com->buffer.min = atoi(opt);
            else com->buffer.min = 500;
            if (com->buffer.min >= com->buffer.max ) com->buffer.min = 0;
        }
        return GF_OK;
    case GF_NET_CHAN_DURATION:
        com->duration.duration = (ch->flags & RTP_HAS_RANGE) ? (ch->range_end - ch->range_start) : 0;
        return GF_OK;
    /*RTP channel config is done upon connection, once the complete SL mapping is known
    however we must store some info not carried in SDP*/
    case GF_NET_CHAN_CONFIG:
        if (com->cfg.frame_duration) ch->depacketizer->sl_hdr.au_duration = com->cfg.frame_duration;
        ch->ts_res = com->cfg.sl_config.timestampResolution;
        return GF_OK;

    case GF_NET_CHAN_PLAY:
        GF_LOG(GF_LOG_DEBUG, GF_LOG_RTP, ("[RTP] Processing play on channel @%08x - %s\n", ch, ch->rtsp ? "RTSP control" : "No control (RTP)" ));
        /*is this RTSP or direct RTP?*/
        ch->flags &= ~RTP_EOS;
        if (ch->rtsp) {
            if (ch->status==RTP_SessionResume) {
                const char *opt = gf_modules_get_option((GF_BaseInterface *) plug, "Streaming", "SessionMigrationPause");
                if (opt && !strcmp(opt, "yes")) {
                    ch->status = RTP_Connected;
                    com->play.start_range = ch->current_start;
                } else {
                    ch->status = RTP_Running;
                    return GF_OK;
                }
            }
            RP_UserCommand(ch->rtsp, ch, com);
        } else {
            ch->status = RTP_Running;
            if (!ch->next_stream)
                priv->cur_mid = ch->mid;

            if (ch->rtp_ch) {
                /*technically we shouldn't attempt to synchronize streams based on RTP, we should use RTCP/ However it
                may happen that the RTCP traffic is absent ...*/
                //ch->check_rtp_time = RTP_SET_TIME_RTP;
                ch->rtcp_init = 0;
                gf_mx_p(priv->mx);
                RP_InitStream(ch, (ch->flags & RTP_CONNECTED) ? 1 : 0);
                gf_mx_v(priv->mx);
                gf_rtp_set_info_rtp(ch->rtp_ch, 0, 0, 0);
            } else {
                /*direct channel, store current start*/
                ch->current_start = com->play.start_range;
                ch->flags |= GF_RTP_NEW_AU;
                gf_rtp_depacketizer_reset(ch->depacketizer, 0);
            }
        }
        return GF_OK;
    case GF_NET_CHAN_STOP:
        /*is this RTSP or direct RTP?*/
        if (ch->rtsp) {
            if (! ch->owner->session_migration) {
                RP_UserCommand(ch->rtsp, ch, com);
            }
        } else {
            ch->status = RTP_Connected;
            ch->owner->last_ntp = 0;
        }
        ch->rtcp_init = 0;
        return GF_OK;
    case GF_NET_CHAN_SET_SPEED:
    case GF_NET_CHAN_PAUSE:
    case GF_NET_CHAN_RESUME:
        assert(ch->rtsp);
        RP_UserCommand(ch->rtsp, ch, com);
        return GF_OK;

    case GF_NET_CHAN_GET_DSI:
        if (ch->depacketizer && ch->depacketizer->sl_map.configSize) {
            com->get_dsi.dsi_len = ch->depacketizer->sl_map.configSize;
            com->get_dsi.dsi = (char*)gf_malloc(sizeof(char)*com->get_dsi.dsi_len);
            memcpy(com->get_dsi.dsi, ch->depacketizer->sl_map.config, sizeof(char)*com->get_dsi.dsi_len);
        } else {
            com->get_dsi.dsi = NULL;
            com->get_dsi.dsi_len = 0;
        }
        return GF_OK;


    case GF_NET_GET_STATS:
        memset(&com->net_stats, 0, sizeof(GF_NetComStats));
        if (ch->rtp_ch) {
            u32 time;
            Float bps;
            com->net_stats.pck_loss_percentage = gf_rtp_get_loss(ch->rtp_ch);
            if (ch->flags & RTP_INTERLEAVED) {
                com->net_stats.multiplex_port = gf_rtsp_get_session_port(ch->rtsp->session);
                com->net_stats.port = gf_rtp_get_low_interleave_id(ch->rtp_ch);
                com->net_stats.ctrl_port = gf_rtp_get_hight_interleave_id(ch->rtp_ch);
            } else {
                com->net_stats.multiplex_port = 0;
                gf_rtp_get_ports(ch->rtp_ch, &com->net_stats.port, &com->net_stats.ctrl_port);
            }
            if (ch->stat_stop_time) {
                time = ch->stat_stop_time - ch->stat_start_time;
            } else {
                time = gf_sys_clock() - ch->stat_start_time;
            }
            bps = 8.0f * ch->rtp_bytes;
            bps *= 1000;
            bps /= time;
            com->net_stats.bw_down = (u32) bps;
            bps = 8.0f * ch->rtcp_bytes;
            bps *= 1000;
            bps /= time;
            com->net_stats.ctrl_bw_down = (u32) bps;
            bps = 8.0f * gf_rtp_get_tcp_bytes_sent(ch->rtp_ch);
            bps *= 1000;
            bps /= time;
            com->net_stats.ctrl_bw_up = (u32) bps;
        }
        return GF_OK;
    default:
        break;
    }
    return GF_NOT_SUPPORTED;
}
Ejemplo n.º 14
0
int dc_video_decoder_read(VideoInputFile *video_input_file, VideoInputData *video_input_data, int source_number, int use_source_timing, int is_live_capture, const int *exit_signal_addr)
{
#ifdef DASHCAST_DEBUG_TIME_
	struct timeval start, end;
	long elapsed_time;
#endif
	AVPacket packet;
	int ret, got_frame, already_locked = 0;
	AVCodecContext *codec_ctx;
	VideoDataNode *video_data_node;

	/* Get a pointer to the codec context for the video stream */
	codec_ctx = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->codec;

	/* Read frames */
	while (1) {
#ifdef DASHCAST_DEBUG_TIME_
		gf_gettimeofday(&start, NULL);
#endif
		memset(&packet, 0, sizeof(AVPacket));
		ret = av_read_frame(video_input_file->av_fmt_ctx, &packet);
#ifdef DASHCAST_DEBUG_TIME_
		gf_gettimeofday(&end, NULL);
		elapsed_time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec);
		fprintf(stdout, "fps: %f\n", 1000000.0/elapsed_time);
#endif

		/* If we demux for the audio thread, send the packet to the audio */
		if (video_input_file->av_fmt_ctx_ref_cnt && ((packet.stream_index != video_input_file->vstream_idx) || (ret == AVERROR_EOF))) {
			AVPacket *packet_copy = NULL;
			if (ret != AVERROR_EOF) {
				GF_SAFEALLOC(packet_copy, AVPacket);
				memcpy(packet_copy, &packet, sizeof(AVPacket));
			}

			assert(video_input_file->av_pkt_list);
			gf_mx_p(video_input_file->av_pkt_list_mutex);
			gf_list_add(video_input_file->av_pkt_list, packet_copy);
			gf_mx_v(video_input_file->av_pkt_list_mutex);

			if (ret != AVERROR_EOF) {
				continue;
			}
		}

		if (ret == AVERROR_EOF) {
			if (video_input_file->mode == LIVE_MEDIA && video_input_file->no_loop == 0) {
				av_seek_frame(video_input_file->av_fmt_ctx, video_input_file->vstream_idx, 0, 0);
				av_free_packet(&packet);
				continue;
			}

			dc_producer_lock(&video_input_data->producer, &video_input_data->circular_buf);
			dc_producer_unlock_previous(&video_input_data->producer, &video_input_data->circular_buf);
			video_data_node = (VideoDataNode *) dc_producer_produce(&video_input_data->producer, &video_input_data->circular_buf);
			video_data_node->source_number = source_number;
			/* Flush decoder */
			memset(&packet, 0, sizeof(AVPacket));
#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(video_data_node->vframe);
#else
			av_frame_unref(video_data_node->vframe);
#endif

			avcodec_decode_video2(codec_ctx, video_data_node->vframe, &got_frame, &packet);
			if (got_frame) {
				dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf);
				return 0;
			}

			dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf);
			dc_producer_unlock(&video_input_data->producer, &video_input_data->circular_buf);
			return -2;
		}
		else if (ret < 0)
		{
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot read video frame.\n"));
			continue;
		}

		/* Is this a packet from the video stream? */
		if (packet.stream_index == video_input_file->vstream_idx) {
			if (!already_locked) {
				if (dc_producer_lock(&video_input_data->producer, &video_input_data->circular_buf) < 0) {
					GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[dashcast] Live system dropped a video frame\n"));
					continue;
				}

				dc_producer_unlock_previous(&video_input_data->producer, &video_input_data->circular_buf);

				already_locked = 1;
			}

			video_data_node = (VideoDataNode *) dc_producer_produce(&video_input_data->producer, &video_input_data->circular_buf);
			video_data_node->source_number = source_number;

			/* Set video frame to default */
#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(video_data_node->vframe);
#else
			av_frame_unref(video_data_node->vframe);
#endif

			/* Decode video frame */
			if (avcodec_decode_video2(codec_ctx, video_data_node->vframe, &got_frame, &packet) < 0) {
				av_free_packet(&packet);
				GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while decoding video.\n"));
				dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf);
				dc_producer_unlock(&video_input_data->producer, &video_input_data->circular_buf);
				return -1;
			}

			/* Did we get a video frame? */
			if (got_frame) {
				if (use_source_timing && is_live_capture) {
					u64 pts;
					if (video_input_file->pts_init == 0) {
						video_input_file->pts_init = 1;
						video_input_file->utc_at_init = gf_net_get_utc();
						video_input_file->first_pts = packet.pts;
						video_input_file->computed_pts = 0;
						video_input_data->frame_duration = codec_ctx->time_base.num;
						video_input_file->sync_tolerance = 9*video_input_data->frame_duration/5;
						//TODO - check with audio if sync is OK
					}
					//perform FPS re-linearisation
					pts = packet.pts - video_input_file->first_pts;
					if (pts - video_input_file->prev_pts > video_input_file->sync_tolerance) {
						u32 nb_lost=0;
						while (pts > video_input_file->computed_pts) {
							video_input_file->computed_pts += video_input_data->frame_duration;
							nb_lost++;
						}

						if (nb_lost) {
							GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[DashCast] Capture lost %d video frames \n", nb_lost));
						}
					}

//					fprintf(stdout, "Capture PTS %g - UTC diff %g - Computed PTS %g\n", (Double) pts / codec_ctx->time_base.den, (Double) (gf_net_get_utc() - video_input_file->utc_at_init) / 1000, (Double) video_input_file->computed_pts / codec_ctx->time_base.den);

					video_input_file->prev_pts = pts;
					video_data_node->vframe->pts = video_input_file->computed_pts;
					video_input_file->computed_pts += video_input_data->frame_duration;
				}

				if (video_data_node->vframe->pts==AV_NOPTS_VALUE) {
					if (!use_source_timing) {
						video_data_node->vframe->pts = video_input_file->frame_decoded;
					} else {
						video_data_node->vframe->pts = video_data_node->vframe->pkt_pts;
					}
				}
				video_input_file->frame_decoded++;

				GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DashCast] Video Frame TS "LLU" decoded at UTC "LLU" ms\n", video_data_node->vframe->pts, gf_net_get_utc() ));

				// For a decode/encode process we must free this memory.
				//But if the input is raw and there is no need to decode then
				// the packet is directly passed for decoded frame. We must wait until rescale is done before freeing it

				if (codec_ctx->codec->id == CODEC_ID_RAWVIDEO) {
					video_data_node->nb_raw_frames_ref = video_input_file->nb_consumers;

					video_data_node->raw_packet = packet;

					dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf);
					while (video_data_node->nb_raw_frames_ref && ! *exit_signal_addr) {
						gf_sleep(0);
					}
				} else {
					dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf);
					av_free_packet(&packet);
				}
				return 0;

			}
		}

		/* Free the packet that was allocated by av_read_frame */
		av_free_packet(&packet);
	}

	GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown error while reading video frame.\n"));
	return -1;
}
Ejemplo n.º 15
0
static void M2TS_FlushRequested(M2TSIn *m2ts)
{
	u32 i, j, req_prog_count, count, prog_id, found;

	gf_mx_p(m2ts->mx);

	found = 0;
	count = gf_list_count(m2ts->ts->requested_pids);
	for (i=0; i<count; i++) {
		M2TSIn_Prog *req_pid = gf_list_get(m2ts->ts->requested_pids, i);
		GF_M2TS_ES *es = m2ts->ts->ess[req_pid->pid];
		if (es==NULL) continue;

		/*move to skip mode for all PES until asked for playback*/
		if (!(es->flags & GF_M2TS_ES_IS_SECTION) && !es->user)
			gf_m2ts_set_pes_framing((GF_M2TS_PES *)es, GF_M2TS_PES_FRAMING_SKIP);
		MP2TS_DeclareStream(m2ts, (GF_M2TS_PES *)es, NULL, 0);
		gf_list_rem(m2ts->ts->requested_pids, i);
		gf_free(req_pid);
		i--;
		count--;
		found++;
	}
	req_prog_count = gf_list_count(m2ts->ts->requested_progs);
	for (i = 0; i < req_prog_count; i++) {
		M2TSIn_Prog *req_prog = gf_list_get(m2ts->ts->requested_progs, i);
		prog_id = atoi(req_prog->fragment);
		count = gf_list_count(m2ts->ts->SDTs);
		for (j=0; j<count; j++) {
			GF_M2TS_SDT *sdt = gf_list_get(m2ts->ts->SDTs, j);
			if (!stricmp((const char *) sdt->service, req_prog->fragment)) req_prog->id = sdt->service_id;
			else if (sdt->service_id==prog_id)  req_prog->id = sdt->service_id;
		}
		if (req_prog->id) {
			GF_M2TS_Program *ts_prog;
			count = gf_list_count(m2ts->ts->programs);
			for (j=0; j<count; j++) {
				ts_prog = gf_list_get(m2ts->ts->programs, j);
				if (ts_prog->number==req_prog->id) {
					MP2TS_SetupProgram(m2ts, ts_prog, 0, 0);
					found++;
					gf_free(req_prog->fragment);
					gf_free(req_prog);
					gf_list_rem(m2ts->ts->requested_progs, i);
					req_prog_count--;
					i--;
					break;
				}
			}
		}
	}

	if (m2ts->epg_requested) {
		if (!m2ts->has_eit) {
			GF_ObjectDescriptor *od = M2TS_GenerateEPG_OD(m2ts);
			/*declare but don't regenerate scene*/
			gf_term_add_media(m2ts->service, (GF_Descriptor*)od, 0);
			m2ts->has_eit = 1;
		}
	} else {
		/*force scene regeneration only when EPG is not requested*/
		if (found)
			gf_term_add_media(m2ts->service, NULL, 0);
	}

	gf_mx_v(m2ts->mx);
}
Ejemplo n.º 16
0
GF_AbstractTSMuxer * ts_amux_new(GF_AVRedirect * avr, u32 videoBitrateInBitsPerSec, u32 width, u32 height, u32 audioBitRateInBitsPerSec) {
    GF_AbstractTSMuxer * ts = gf_malloc( sizeof(GF_AbstractTSMuxer));
    memset( ts, 0, sizeof( GF_AbstractTSMuxer));
    ts->oc = avformat_alloc_context();
    ts->destination = avr->destination;
    av_register_all();
    ts->oc->oformat = GUESS_FORMAT(NULL, avr->destination, NULL);
    if (!ts->oc->oformat)
        ts->oc->oformat = GUESS_FORMAT("mpegts", NULL, NULL);
    assert( ts->oc->oformat);
#if REDIRECT_AV_AUDIO_ENABLED
    ts->audio_st = av_new_stream(ts->oc, avr->audioCodec->id);
    {
        AVCodecContext * c = ts->audio_st->codec;
        c->codec_id = avr->audioCodec->id;
        c->codec_type = AVMEDIA_TYPE_AUDIO;
        /* put sample parameters */
        c->sample_fmt = SAMPLE_FMT_S16;
        c->bit_rate = audioBitRateInBitsPerSec;
        c->sample_rate = avr->audioSampleRate;
        c->channels = 2;
        c->time_base.num = 1;
        c->time_base.den = 1000;
        // some formats want stream headers to be separate
        if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
#endif

    ts->video_st = av_new_stream(ts->oc, avr->videoCodec->id);
    {
        AVCodecContext * c = ts->video_st->codec;
        c->codec_id = avr->videoCodec->id;
        c->codec_type = AVMEDIA_TYPE_VIDEO;

        /* put sample parameters */
        c->bit_rate = videoBitrateInBitsPerSec;
        /* resolution must be a multiple of two */
        c->width = width;
        c->height = height;
        /* time base: this is the fundamental unit of time (in seconds) in terms
           of which frame timestamps are represented. for fixed-fps content,
           timebase should be 1/framerate and timestamp increments should be
           identically 1. */
        c->time_base.den = STREAM_FRAME_RATE;
        c->time_base.num = 1;
        c->gop_size = 12; /* emit one intra frame every twelve frames at most */
        c->pix_fmt = STREAM_PIX_FMT;
        if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
            /* just for testing, we also add B frames */
            c->max_b_frames = 2;
        }
        if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
            /* Needed to avoid using macroblocks in which some coeffs overflow.
               This does not happen with normal video, it just happens here as
               the motion of the chroma plane does not match the luma plane. */
            c->mb_decision=2;
        }
        // some formats want stream headers to be separate
        if (ts->oc->oformat->flags & AVFMT_GLOBALHEADER)
            c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    }
    //av_set_pts_info(ts->audio_st, 33, 1, audioBitRateInBitsPerSec);

#ifndef AVIO_FLAG_WRITE
	/* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(ts->oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        return NULL;
    }
#endif
	
	dump_format(ts->oc, 0, avr->destination, 1);
    GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] DUMPING to %s...\n", ts->destination));

#if (LIBAVCODEC_VERSION_MAJOR<55)
    if (avcodec_open(ts->video_st->codec, avr->videoCodec) < 0) {
#else
	if (avcodec_open2(ts->video_st->codec, avr->videoCodec, NULL) < 0) {
#endif
		GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open video codec\n"));
        return NULL;
    }
#if REDIRECT_AV_AUDIO_ENABLED
#if (LIBAVCODEC_VERSION_MAJOR<55)
    if (avcodec_open(ts->audio_st->codec, avr->audioCodec) < 0) {
#else
	if (avcodec_open2(ts->audio_st->codec, avr->audioCodec, NULL) < 0) {
#endif
		GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] failed to open audio codec\n"));
        return NULL;
    }
    ts->audioMx = gf_mx_new("TS_AudioMx");
#endif
    ts->videoMx = gf_mx_new("TS_VideoMx");
    ts->tsEncodingThread = gf_th_new("ts_interleave_thread_run");
    ts->encode = 1;
    ts->audioPackets = NULL;
    ts->videoPackets = NULL;
    gf_th_run(ts->tsEncodingThread, ts_interleave_thread_run, ts);
    return ts;
}

void ts_amux_del(GF_AbstractTSMuxer * muxerToDelete) {
    if (!muxerToDelete)
        return;
    muxerToDelete->encode = 0;
    gf_sleep(100);
    gf_th_stop(muxerToDelete->tsEncodingThread);
    muxerToDelete->tsEncodingThread = NULL;
#if REDIRECT_AV_AUDIO_ENABLED
    gf_mx_del(muxerToDelete->audioMx);
    muxerToDelete->audioMx = NULL;
#endif
    gf_mx_del(muxerToDelete->videoMx);
    muxerToDelete->videoMx = NULL;
    if (muxerToDelete->video_st) {
        avcodec_close(muxerToDelete->video_st->codec);
        muxerToDelete->video_st = NULL;
    }
#if REDIRECT_AV_AUDIO_ENABLED
    if (muxerToDelete->audio_st) {
        avcodec_close(muxerToDelete->audio_st->codec);
        muxerToDelete->audio_st = NULL;
    }
#endif
    /* write the trailer, if any.  the trailer must be written
     * before you close the CodecContexts open when you wrote the
     * header; otherwise write_trailer may try to use memory that
     * was freed on av_codec_close() */
    if (muxerToDelete->oc) {
        u32 i;
        /* free the streams */
        for (i = 0; i < muxerToDelete->oc->nb_streams; i++) {
            av_freep(&muxerToDelete->oc->streams[i]->codec);
            av_freep(&muxerToDelete->oc->streams[i]);
        }

        /* free the stream */
        av_free(muxerToDelete->oc);
        muxerToDelete->oc = NULL;
    }
}

Bool ts_encode_audio_frame(GF_AbstractTSMuxer * ts, uint8_t * data, int encoded, u64 pts) {
    AVPacketList *pl;
    AVPacket * pkt;
    if (!ts->encode)
        return 1;
    pl = gf_malloc(sizeof(AVPacketList));
    pl->next = NULL;
    pkt = &(pl->pkt);
    av_init_packet(pkt);
    assert( ts->audio_st);
    assert( ts->audio_st->codec);
    pkt->flags = 0;
    if (ts->audio_st->codec->coded_frame) {
        if (ts->audio_st->codec->coded_frame->key_frame)
            pkt->flags = AV_PKT_FLAG_KEY;
        if (ts->audio_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
            pkt->pts = av_rescale_q(ts->audio_st->codec->coded_frame->pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
        } else {
            if (pts == AV_NOPTS_VALUE)
                pkt->pts = AV_NOPTS_VALUE;
            else {
                pkt->pts = av_rescale_q(pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
	    }
        }
    } else {
        if (pts == AV_NOPTS_VALUE)
            pkt->pts = AV_NOPTS_VALUE;
        else
            pkt->pts = av_rescale_q(pts, ts->audio_st->codec->time_base, ts->audio_st->time_base);
    }
    pkt->stream_index= ts->audio_st->index;
    pkt->data = data;
    pkt->size = encoded;
    //fprintf(stderr, "AUDIO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, pts, pl);
    gf_mx_p(ts->audioMx);
    if (!ts->audioPackets)
        ts->audioPackets = pl;
    else {
        AVPacketList * px = ts->audioPackets;
        while (px->next)
            px = px->next;
        px->next = pl;
    }
    gf_mx_v(ts->audioMx);
    return 0;
}

Bool ts_encode_video_frame(GF_AbstractTSMuxer* ts, uint8_t* data, int encoded) {
    AVPacketList *pl;
    AVPacket * pkt;
    if (!ts->encode)
        return 1;
    pl = gf_malloc(sizeof(AVPacketList));
    pl->next = NULL;
    pkt = &(pl->pkt);

    av_init_packet(pkt);

    if (ts->video_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
        //pkt->pts= av_rescale_q(ts->video_st->codec->coded_frame->pts, ts->video_st->codec->time_base, ts->video_st->time_base);
        pkt->pts = ts->video_st->codec->coded_frame->pts * ts->video_st->time_base.den / ts->video_st->time_base.num / 1000;
        //pkt->pts = ts->video_st->codec->coded_frame->pts;
    }
    if (ts->video_st->codec->coded_frame->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    pkt->stream_index= ts->video_st->index;
    pkt->data= data;
    pkt->size= encoded;
    //fprintf(stderr, "VIDEO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, ts->video_st->codec->coded_frame->pts, pl);
    gf_mx_p(ts->videoMx);
    if (!ts->videoPackets)
        ts->videoPackets = pl;
    else {
        AVPacketList * px = ts->videoPackets;
        while (px->next)
            px = px->next;
        px->next = pl;
    }
    gf_mx_v(ts->videoMx);
    return 0;
}
Ejemplo n.º 17
0
static GF_Descriptor *M2TS_GetServiceDesc(GF_InputService *plug, u32 expect_type, const char *sub_url)
{
	M2TSIn *m2ts = plug->priv;
	GF_Descriptor *desc = NULL;
	char *frag;
	M2TSIn_Prog *prog;

	if (sub_url && !strnicmp(sub_url, "pid://", 6)) {
		GF_ObjectDescriptor *od;
		u32 pid = atoi(sub_url+6);
		if (pid>=GF_M2TS_MAX_STREAMS) return NULL;
		od = MP2TS_GetOD(m2ts, (GF_M2TS_PES*) m2ts->ts->ess[pid], NULL, 0, NULL);
		return (GF_Descriptor *) od;
	}

	frag = sub_url ? strrchr(sub_url, '#') : NULL;
	if (frag) frag++;

	/* consider the channel name in DVB URL as a fragment */
	if (!frag && !strncmp(sub_url, "dvb://", 6)) {
		frag = (char*)sub_url + 6;
	}

	if (!frag) {
		m2ts->request_all_pids = 1;
	} else {
		/*we need exclusive access*/
		gf_mx_p(m2ts->mx);
		if (!strnicmp(frag, "pid=", 4)) {
			GF_SAFEALLOC(prog, M2TSIn_Prog);
			prog->pid = atoi(frag+4);
			gf_list_add(m2ts->ts->requested_pids, prog);
		} else if (!strnicmp(frag, "EPG", 3)) {
			m2ts->epg_requested = 1;
		} else {
			u32 i, count;
			count = gf_list_count(m2ts->ts->requested_progs);
			prog = NULL;
			for (i=0; i<count; i++) {
				prog = gf_list_get(m2ts->ts->requested_progs, i);
				if (!strcmp(prog->fragment, frag))
					break;
				prog = NULL;
			}
			if (!prog) {
				GF_SAFEALLOC(prog, M2TSIn_Prog);
				gf_list_add(m2ts->ts->requested_progs, prog);
				prog->fragment = gf_strdup(frag);
			}
		}
		gf_mx_v(m2ts->mx);
	}

	/*if type is undefined, check the PMT for an IOD*/
	if (expect_type<=GF_MEDIA_OBJECT_SCENE) {
		if (gf_list_count(m2ts->ts->programs) == 1) {
			GF_M2TS_Program *prog = gf_list_get(m2ts->ts->programs, 0);
			if (prog->pmt_iod) {
				m2ts->request_all_pids = 0;
				gf_odf_desc_copy((GF_Descriptor *)prog->pmt_iod, &desc);
				((GF_InitialObjectDescriptor *)desc)->service_ifce = m2ts->owner;
				return desc;
			}
		}
		/*if we expect scene, return NULL and repost a connection ack when we get the PMT*/
		if (expect_type==GF_MEDIA_OBJECT_SCENE)
			return NULL;
		if (m2ts->epg_requested) {
			GF_ObjectDescriptor *od = M2TS_GenerateEPG_OD(m2ts);
			m2ts->epg_requested = 0;
			return (GF_Descriptor *)od;
		} else {
			/*returning an empty IOD means "no scene description", let the terminal handle all media objects*/
			desc = gf_odf_desc_new(GF_ODF_IOD_TAG);
			((GF_ObjectDescriptor *) desc)->objectDescriptorID = 1;
			return desc;
		}
	}

	/* restart the thread if the same service is reused and if the previous thread terminated */
	if (m2ts->ts->run_state == 2) {
		m2ts->ts->file_regulate = 0;
		gf_m2ts_demuxer_play(m2ts->ts);
	}

	return NULL;
}
Ejemplo n.º 18
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	u64 seek_audio, seek_video;
	Bool video_init, do_seek, map_audio_time, map_video_time;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_CHAN_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;
	seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
	map_video_time = !ffd->seekable;

	video_init = (seek_to && ffd->video_ch) ? 0 : 1;
	seek_audio = seek_video = 0;
	if (ffd->seekable && (ffd->audio_st>=0)) seek_audio = (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den);
	if (ffd->seekable && (ffd->video_st>=0)) seek_video = (u64) (s64) (ffd->seek_time*ffd->video_tscale.den);

	/*it appears that ffmpeg has trouble resyncing on some mpeg files - we trick it by restarting to 0 to get the
	first video frame, and only then seek*/
	if (ffd->seekable) av_seek_frame(ffd->ctx, -1, video_init ? seek_to : 0, AVSEEK_FLAG_BACKWARD);
	do_seek = !video_init;
	map_audio_time = video_init ? ffd->unreliable_audio_timing : 0;

	while (ffd->is_running) {

		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) && !do_seek) {
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

			if (map_audio_time) {
				map.base.on_channel = ffd->audio_ch;
				map.map_time.media_time = ffd->seek_time;
				/*mapwith TS=0 since we don't use SL*/
				map.map_time.timestamp = 0;
				map.map_time.reset_buffers = 1;
				map_audio_time = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_audio) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio;
			}
			gf_term_on_sl_packet(ffd->service, ffd->audio_ch, pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;

			/*if we get pts = 0 after a seek the demuxer is reseting PTSs, so force map time*/
			if ((!do_seek && seek_to && !slh.compositionTimeStamp) || (map_video_time) ) {
				seek_to = 0;
				map_video_time = 0;

				map.base.on_channel = ffd->video_ch;
				map.map_time.timestamp = (u64) pkt.pts;
//				map.map_time.media_time = ffd->seek_time;
				map.map_time.media_time = 0;
				map.map_time.reset_buffers = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_video) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video;
			}
			gf_term_on_sl_packet(ffd->service, ffd->video_ch, pkt.data, pkt.size, &slh, GF_OK);
			video_init = 1;
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*here's the trick - only seek after sending the first packets of each stream - this allows ffmpeg video decoders
		to resync properly*/
		if (do_seek && video_init && ffd->seekable) {
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			do_seek = 0;
			map_audio_time = ffd->unreliable_audio_timing;
		}
		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (1) {
			if (ffd->audio_ch) {
				com.base.on_channel = ffd->audio_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			if (ffd->video_ch) {
				com.base.on_channel = ffd->video_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			gf_sleep(10);

			/*escape if disconnect*/
			if (!ffd->audio_run && !ffd->video_run) break;
		}
		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Ejemplo n.º 19
0
NPT_Result GPAC_GenericController::OnActionResponse(NPT_Result res, PLT_ActionReference& action, void* userdata)
{
#ifdef GPAC_HAS_SPIDERMONKEY
	u32 i, count;
	GPAC_DeviceItem *item = NULL;
	GPAC_ServiceItem *serv = NULL;
	GPAC_ActionArgListener *argl, *act_l;
	PLT_Service* service = action->GetActionDesc().GetService();
	NPT_String uuid;
	GPAC_ActionUDTA *act_udta = (GPAC_ActionUDTA *)userdata;

	/*this is NOT an actionResponse to an action triggered on a generic device*/
	if (act_udta && act_udta->m_Reserved) act_udta = NULL;

	GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] Receive %s Response - error code %d\n", (char *) action->GetActionDesc().GetName(), res));

	gf_mx_p(m_ControlPointLock);

	/*get our device*/
	uuid = service->GetDevice()->GetUUID();
	count = gf_list_count(m_Devices);
	for (i=0; i<count; i++) {
		item = (GPAC_DeviceItem *) gf_list_get(m_Devices, i);
		if (item->m_UUID == uuid ) {
			break;
		}
		item = NULL;
	}
	gf_mx_v(m_ControlPointLock);

	if (!item) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[UPnP] Receive %s Response on unknown device (uuid %s)\n", (char *) action->GetActionDesc().GetName(), (char *) uuid));
		goto exit;
	}
	/*get our service*/
	count = gf_list_count(item->m_Services);
	for (i=0; i<count; i++) {
		serv = (GPAC_ServiceItem *)gf_list_get(item->m_Services, i);
		if (serv->m_service == service) break;
	}
	if (!serv) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[UPnP] Receive %s Response on unknown service %s\n", (char *) action->GetActionDesc().GetName(), (char *) service->GetServiceType()));
		goto exit;
	}

	/*locate our listeners*/
	act_l = NULL;
	i=0;
	while ((argl = (GPAC_ActionArgListener *)gf_list_enum(serv->m_ArgListeners, &i))) {
		NPT_String value;
		GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] checking argument %s\n", (char *) argl->action->GetName() ));
		if (argl->action->GetName() != action->GetActionDesc().GetName() ) continue;

		/*global action listener*/
		if (argl->arg==NULL) {
			act_l = argl;
			continue;
		}
		/*if error don't trigger listeners*/
		if (res != NPT_SUCCESS) {
			GF_LOG(GF_LOG_WARNING, GF_LOG_NETWORK, ("[UPnP] Receive %s Response: error on remote device %d\n", (char *) action->GetActionDesc().GetName(), res));
			continue;
		}
		/*action arg listener*/
		if (action->GetArgumentValue(argl->arg->GetName(), value) == NPT_SUCCESS) {
			jsval argv[1], rval;

			GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] Calling handler for response %s argument %s\n", (char *) action->GetActionDesc().GetName(), (char *) argl->arg->GetName() ));
			m_pUPnP->LockJavascript(1);
			argv[0] = STRING_TO_JSVAL( JS_NewStringCopyZ(serv->js_ctx, value) );
			JS_CallFunctionValue(serv->js_ctx, serv->obj, argl->on_event, 1, argv, &rval);
			m_pUPnP->LockJavascript(0);
		} else {
			GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[UPnP] %s Response: couldn't get argument %s value\n", (char *) action->GetActionDesc().GetName(), (char *) argl->arg->GetName() ));
		}
	}

	if (act_l) {
		jsval rval;
		m_pUPnP->LockJavascript(1);
		if (act_l->is_script) {
			JSObject *act_obj;
			jsval argv[2];

			GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] Calling handler for response %s\n", (char *) action->GetActionDesc().GetName()));

			act_obj = JS_NewObject(serv->js_ctx, &item->m_pUPnP->upnpDeviceClass._class, 0, item->obj);
			SMJS_SET_PRIVATE(serv->js_ctx, act_obj, (void *)action.AsPointer() );
			JS_DefineFunction(serv->js_ctx, act_obj, "GetArgumentValue", upnp_action_get_argument_value, 1, 0);
			JS_DefineFunction(serv->js_ctx, act_obj, "GetErrorCode", upnp_action_get_error_code, 1, 0);
			JS_DefineFunction(serv->js_ctx, act_obj, "GetError", upnp_action_get_error, 1, 0);

			gf_js_add_root(serv->js_ctx, &act_obj, GF_JSGC_OBJECT);
			argv[0] = OBJECT_TO_JSVAL(act_obj);
			if (act_udta) {
				argv[1] = act_udta->udta;
				JS_CallFunctionValue(serv->js_ctx, serv->obj, act_l->on_event, 2, argv, &rval);
			} else {
				JS_CallFunctionValue(serv->js_ctx, serv->obj, act_l->on_event, 1, argv, &rval);
			}
			gf_js_remove_root(serv->js_ctx, &act_obj, GF_JSGC_OBJECT);
		}
		/*if error don't trigger listeners*/
		else if (res == NPT_SUCCESS) {
			GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] Calling handler for response %s\n", (char *) action->GetActionDesc().GetName()));
			JS_CallFunctionValue(serv->js_ctx, serv->obj, act_l->on_event, 0, 0, &rval);
		}
		else {
			GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[UPnP] response %s has error %d\n", (char *) action->GetActionDesc().GetName(), res ));
		}
		m_pUPnP->LockJavascript(0);
	}
	GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] Done processing response %s\n", (char *) action->GetActionDesc().GetName()));

exit:
	if (act_udta) {
		gf_js_remove_root(serv->js_ctx, &act_udta->udta, GF_JSGC_VAL);
		delete act_udta;
	}

	return NPT_SUCCESS;
#else
	return NPT_SUCCESS;
#endif
}
Ejemplo n.º 20
0
Archivo: decoder.c Proyecto: erelh/gpac
GF_Err gf_codec_add_channel(GF_Codec *codec, GF_Channel *ch)
{
	GF_Err e;
	GF_NetworkCommand com;
	GF_Channel *a_ch;
	u32 CUsize, i;
	GF_CodecCapability cap;
	u32 min, max;


	/*only for valid codecs (eg not OCR)*/
	if (codec->decio) {
		com.get_dsi.dsi = NULL;
		if (ch->esd->decoderConfig->upstream) codec->flags |= GF_ESM_CODEC_HAS_UPSTREAM;
		/*For objects declared in OD stream, override with network DSI if any*/
		if (ch->service && !(ch->odm->flags & GF_ODM_NOT_IN_OD_STREAM) ) {
			com.command_type = GF_NET_CHAN_GET_DSI;
			com.base.on_channel = ch;
			e = gf_term_service_command(ch->service, &com);
			if (!e && com.get_dsi.dsi) {
				if (ch->esd->decoderConfig->decoderSpecificInfo->data) gf_free(ch->esd->decoderConfig->decoderSpecificInfo->data);
				ch->esd->decoderConfig->decoderSpecificInfo->data = com.get_dsi.dsi;
				ch->esd->decoderConfig->decoderSpecificInfo->dataLength = com.get_dsi.dsi_len;
			}
		}
		GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[Codec] Attaching stream %d to codec %s\n", ch->esd->ESID, codec->decio->module_name));

		/*lock the channel before setup in case we are using direct_decode */
		gf_mx_p(ch->mx);
		e = codec->decio->AttachStream(codec->decio, ch->esd);
		gf_mx_v(ch->mx);

		if (ch->esd->decoderConfig && ch->esd->decoderConfig->rvc_config) {
			gf_odf_desc_del((GF_Descriptor *)ch->esd->decoderConfig->rvc_config);
			ch->esd->decoderConfig->rvc_config = NULL;
		}

		if (e) {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[Codec] Attach Stream failed %s\n", gf_error_to_string(e) ));
			return e;
		}

		/*ask codec for desired output capacity - note this may be 0 if stream is not yet configured*/
		cap.CapCode = GF_CODEC_OUTPUT_SIZE;
		gf_codec_get_capability(codec, &cap);
		if (codec->CB && (cap.cap.valueInt != codec->CB->UnitSize)) {
			gf_cm_del(codec->CB);
			codec->CB = NULL;
		}
		CUsize = cap.cap.valueInt;

		/*get desired amount of units and minimal fullness (used for scheduling)*/
		switch(codec->type) {
		case GF_STREAM_VISUAL:
		case GF_STREAM_AUDIO:
			cap.CapCode = GF_CODEC_BUFFER_MIN;
			cap.cap.valueInt = 1;
			gf_codec_get_capability(codec, &cap);
			min = cap.cap.valueInt;
			cap.CapCode = GF_CODEC_BUFFER_MAX;
			cap.cap.valueInt = 1;
			gf_codec_get_capability(codec, &cap);
			max = cap.cap.valueInt;
			break;
		case GF_STREAM_ND_SUBPIC:
			max = 1;
			min = 0;
			break;
		default:
			min = max = 0;
			break;
		}
		if ((codec->type==GF_STREAM_AUDIO) && (max<2)) max = 2;

		/*setup CB*/
		if (!codec->CB && max) {
			if (codec->flags & GF_ESM_CODEC_IS_RAW_MEDIA) {
				max = 1;
				/*create a semaphore in non-notified stage*/
				codec->odm->raw_frame_sema = gf_sema_new(1, 0);
			}

			GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[ODM] Creating composition buffer for codec %s - %d units %d bytes each\n", codec->decio->module_name, max, CUsize));

			codec->CB = gf_cm_new(CUsize, max, (codec->flags & GF_ESM_CODEC_IS_RAW_MEDIA) ? 1 : 0);
			codec->CB->Min = min;
			codec->CB->odm = codec->odm;
		}

		if (codec->CB) {
			/*check re-ordering - set by default on all codecs*/
			codec->is_reordering = 1;
			cap.CapCode = GF_CODEC_REORDER;
			if (gf_codec_get_capability(codec, &cap) == GF_OK)
				codec->is_reordering = cap.cap.valueInt;
		}

		if (codec->flags & GF_ESM_CODEC_IS_RAW_MEDIA) {
			ch->is_raw_channel = 1;
		}

		/*setup net channel config*/
		if (ch->service) {
			memset(&com, 0, sizeof(GF_NetworkCommand));
			com.command_type = GF_NET_CHAN_CONFIG;
			com.base.on_channel = ch;

			com.cfg.priority = ch->esd->streamPriority;
			assert( ch->clock );
			com.cfg.sync_id = ch->clock->clockID;
			memcpy(&com.cfg.sl_config, ch->esd->slConfig, sizeof(GF_SLConfig));
			/*get the frame duration if audio (used by some network stack)*/
			if (ch->odm->codec && (ch->odm->codec->type==GF_STREAM_AUDIO) ) {
				cap.CapCode = GF_CODEC_SAMPLERATE;
				gf_codec_get_capability(ch->odm->codec, &cap);
				com.cfg.sample_rate = cap.cap.valueInt;
				cap.CapCode = GF_CODEC_CU_DURATION;
				gf_codec_get_capability(ch->odm->codec, &cap);
				com.cfg.frame_duration = cap.cap.valueInt;
			}
			gf_term_service_command(ch->service, &com);

			ch->carousel_type = GF_ESM_CAROUSEL_NONE;
			if (com.cfg.use_m2ts_sections) {
				ch->carousel_type = GF_ESM_CAROUSEL_MPEG2;
			} else {
				switch (ch->esd->decoderConfig->streamType) {
				case GF_STREAM_OD:
				case GF_STREAM_SCENE:
					ch->carousel_type = ch->esd->slConfig->AUSeqNumLength ? GF_ESM_CAROUSEL_MPEG4 : GF_ESM_CAROUSEL_NONE;
					break;
				}
			}

		}
	} else if (codec->flags & GF_ESM_CODEC_IS_RAW_MEDIA) {
		cap.CapCode = GF_CODEC_OUTPUT_SIZE;
		gf_codec_get_capability(codec, &cap);
		if (codec->CB && (cap.cap.valueInt != codec->CB->UnitSize)) {
			gf_cm_del(codec->CB);
			codec->CB = NULL;
		}
		CUsize = cap.cap.valueInt;
		/*create a semaphore in non-notified stage*/
		codec->odm->raw_frame_sema = gf_sema_new(1, 0);

		codec->CB = gf_cm_new(CUsize, 1, 1);
		codec->CB->Min = 0;
		codec->CB->odm = codec->odm;
		ch->is_raw_channel = 1;
		if (gf_es_owns_clock(ch))
			ch->is_raw_channel = 2;

		if (ch->is_pulling) {
			codec->process = gf_codec_process_raw_media_pull;
		}
	}


	/*assign the first base layer as the codec clock by default, or current channel clock if no clock set
	Also assign codec priority here*/
	if (!ch->esd->dependsOnESID || !codec->ck) {
		codec->ck = ch->clock;
		/*insert base layer first - note we are sure this is a stream of the same type
		as the codec (other streams - OCI, MPEG7, MPEGJ - are not added that way)*/
		return gf_list_insert(codec->inChannels, ch, 0);
	}
	else {
		/*make sure all channels are in order*/
		i=0;
		while ((a_ch = (GF_Channel*)gf_list_enum(codec->inChannels, &i))) {
			if (ch->esd->dependsOnESID == a_ch->esd->ESID) {
				return gf_list_insert(codec->inChannels, ch, i);
			}
			if (a_ch->esd->dependsOnESID == ch->esd->ESID) {
				return gf_list_insert(codec->inChannels, ch, i-1);
			}
		}
		/*by default append*/
		return gf_list_add(codec->inChannels, ch);
	}
}
Ejemplo n.º 21
0
void gf_sr_simulation_tick(GF_Renderer *sr)
{	
	u32 in_time, end_time, i, count;

	/*lock renderer for the whole render cycle*/
	gf_sr_lock(sr, 1);

	/*first thing to do, let the video output handle user event if it is not threaded*/
	sr->video_out->ProcessEvent(sr->video_out, NULL);

	if (sr->freeze_display) {
		gf_sr_lock(sr, 0);
		gf_sleep(sr->frame_duration);
		return;
	}

	gf_sr_reconfig_task(sr);

	/* if there is no scene, we draw a black screen to flush the screen */
	if (!sr->scene) {
		sr->visual_renderer->DrawScene(sr->visual_renderer);
		gf_sr_lock(sr, 0);
		gf_sleep(sr->frame_duration);
		return;
	}

//	GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[General] Time %f - Composing new frame #%d\n", gf_node_get_scene_time(gf_sg_get_root_node(sr->scene)), sr->frame_number));

	in_time = gf_sys_clock();
	if (sr->reset_graphics) sr->draw_next_frame = 1;

#ifdef GF_SR_EVENT_QUEUE
	/*process pending user events*/
	gf_mx_p(sr->ev_mx);
	while (gf_list_count(sr->events)) {
		GF_Event *ev = (GF_Event*)gf_list_get(sr->events, 0);
		gf_list_rem(sr->events, 0);
		if (!sr->visual_renderer->ExecuteEvent(sr->visual_renderer, ev)) {
			SR_ForwardUserEvent(sr, ev);
		}
		free(ev);
	}
	gf_mx_v(sr->ev_mx);
#endif


#if 0
	if (sr->frame_number == 0 && sr->user->EventProc) {
		GF_Event evt;
		evt.type = GF_EVENT_UPDATE_RTI;
		evt.caption.caption = "UPDATE - Before first call to draw scene";
		sr->user->EventProc(sr->user->opaque, &evt);
	}
#endif

	/*execute all routes before updating textures, otherwise nodes inside composite texture may never see their
	dirty flag set*/
	gf_sg_activate_routes(sr->scene);

#ifndef GPAC_DISABLE_SVG
#if SVG_FIXME
	{ /* Experimental (Not SVG compliant system events (i.e. battery, cpu ...) triggered to the root node)*/
		GF_Node *root = gf_sg_get_root_node(sr->scene);
		GF_DOM_Event evt;
		if (gf_dom_listener_count(root)) {
			u32 i, count;
			count = gf_dom_listener_count(root);
			for (i=0;i<count; i++) {
				SVG_SA_listenerElement *l = gf_dom_listener_get(root, i);
				if (l->event.type == GF_EVENT_CPU) {
					GF_SystemRTInfo sys_rti;
					if (gf_sys_get_rti(500, &sys_rti, GF_RTI_ALL_PROCESSES_TIMES)) {
						evt.type = GF_EVENT_CPU;
						evt.cpu_percentage = sys_rti.total_cpu_usage;
						//printf("%d\n",sys_rti.total_cpu_usage);
						gf_dom_event_fire(root, NULL, &evt);
					} 
				} else if (l->event.type == GF_EVENT_BATTERY) { //&& l->observer.target == (SVG_SA_Element *)node) {
					evt.type = GF_EVENT_BATTERY;
					gf_sys_get_battery_state(&evt.onBattery, &evt.batteryState, &evt.batteryLevel);
					gf_dom_event_fire(root, NULL, &evt);
				}
			}
		}
	}
#endif

	if (gf_smil_notify_timed_elements(sr->scene)) {
		sr->draw_next_frame = 1;
	}
#if 0
	for (i=0; i<gf_list_count(sr->secondary_scenes); i++) {
		if (gf_smil_notify_timed_elements(gf_list_get(sr->secondary_scenes, i))) {
			sr->draw_next_frame = 1;
		}
	}
#endif

#endif

	/*update all textures*/
	count = gf_list_count(sr->textures);
	for (i=0; i<count; i++) {
		GF_TextureHandler *st = (GF_TextureHandler *)gf_list_get(sr->textures, i);
		/*signal graphics reset before updating*/
		if (sr->reset_graphics && st->hwtx) sr->visual_renderer->TextureHWReset(st);
		st->update_texture_fcnt(st);
	}

	/*if invalidated, draw*/
	if (sr->draw_next_frame) {
		/*video flush only*/
		if (sr->draw_next_frame==2) {
			GF_Window rc;
			rc.x = rc.y = 0; 
			rc.w = sr->width;	
			rc.h = sr->height;		
			sr->draw_next_frame = 0;
			sr->video_out->Flush(sr->video_out, &rc);
		} else {
			sr->draw_next_frame = 0;
			GF_LOG(GF_LOG_DEBUG, GF_LOG_RENDER, ("[Render] Redrawing scene\n"));
			sr->visual_renderer->DrawScene(sr->visual_renderer);
#if 0
			if (sr->frame_number == 0 && sr->user->EventProc) {
				GF_Event evt;
				evt.type = GF_EVENT_UPDATE_RTI;
				evt.caption.caption = "Before first call to draw scene";
				sr->user->EventProc(sr->user->opaque, &evt);
			}
#endif
		}
		sr->reset_graphics = 0;

		GF_LOG(GF_LOG_INFO, GF_LOG_RENDER, ("[Render] Scene drawn in %d ms\n", gf_sys_clock() - in_time));

		if (sr->stress_mode) {
			sr->draw_next_frame = 1;
			sr->reset_graphics = 1;
		}
	}

	/*release all textures - we must release them to handle a same OD being used by several textures*/
	count = gf_list_count(sr->textures);
	for (i=0; i<count; i++) {
		GF_TextureHandler *st = (GF_TextureHandler *)gf_list_get(sr->textures, i);
		gf_sr_texture_release_stream(st);
	}

	/*update all timed nodes */
	for (i=0; i<gf_list_count(sr->time_nodes); i++) {
		GF_TimeNode *tn = (GF_TimeNode *)gf_list_get(sr->time_nodes, i);
		if (!tn->needs_unregister) tn->UpdateTimeNode(tn);
		if (tn->needs_unregister) {
			tn->is_registered = 0;
			tn->needs_unregister = 0;
			gf_list_rem(sr->time_nodes, i);
			i--;
			continue;
		}
	}

	end_time = gf_sys_clock() - in_time;

	gf_sr_lock(sr, 0);

	sr->current_frame = (sr->current_frame+1) % GF_SR_FPS_COMPUTE_SIZE;
	sr->frame_time[sr->current_frame] = end_time;

	sr->frame_number++;
#if 0
	if (sr->user->EventProc) {
		char legend[100];
		GF_Event evt;
		evt.type = GF_EVENT_UPDATE_RTI;
		sprintf(legend, "After rendering of frame %d", sr->frame_number);
		evt.caption.caption = legend;
		sr->user->EventProc(sr->user->opaque, &evt);
	}
#endif

	/*step mode on, pause and return*/
	if (sr->step_mode) {
		sr->step_mode = 0;
		if (sr->term) gf_term_set_option(sr->term, GF_OPT_PLAY_STATE, GF_STATE_PAUSED);
		return;
	}
	/*not threaded, let the owner decide*/
	if ((sr->user->init_flags & GF_TERM_NO_VISUAL_THREAD) || !sr->frame_duration) return;

	/*compute sleep time till next frame, otherwise we'll kill the CPU*/
	i=1;
	while (i * sr->frame_duration < end_time) i++;
	in_time = i * sr->frame_duration - end_time;
	gf_sleep(in_time);
}
Ejemplo n.º 22
0
void gf_term_add_codec(GF_Terminal *term, GF_Codec *codec)
{
	u32 i, count;
	Bool threaded;
	CodecEntry *cd;
	CodecEntry *ptr, *next;
	GF_CodecCapability cap;
	assert(codec);

	GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Terminal] Registering codec %s\n", codec->decio ? codec->decio->module_name : "RAW"));

	/*caution: the mutex can be grabbed by a decoder waiting for a mutex owned by the calling thread
	this happens when several scene codecs are running concurently and triggering play/pause on media*/
	gf_mx_p(term->mm_mx);

	cd = mm_get_codec(term->codecs, codec);
	if (cd) goto exit;

	GF_SAFEALLOC(cd, CodecEntry);
	cd->dec = codec;
	if (!cd->dec->Priority)
		cd->dec->Priority = 1;

	/*we force audio codecs to be threaded in free mode, so that we avoid waiting in the audio renderer if another decoder is locking the main mutex
	this can happen when the audio decoder is running late*/
	if (codec->type==GF_STREAM_AUDIO) {
		threaded = 1;
	} else {
		cap.CapCode = GF_CODEC_WANTS_THREAD;
		cap.cap.valueInt = 0;
		gf_codec_get_capability(codec, &cap);
		threaded = cap.cap.valueInt;
	}

	if (threaded) cd->flags |= GF_MM_CE_REQ_THREAD;


	if (term->flags & GF_TERM_MULTI_THREAD) {
		if ((codec->type==GF_STREAM_AUDIO) || (codec->type==GF_STREAM_VISUAL)) threaded = 1;
	} else if (term->flags & GF_TERM_SINGLE_THREAD) {
		threaded = 0;
	}
	if (codec->flags & GF_ESM_CODEC_IS_RAW_MEDIA)
		threaded = 0;

	if (threaded) {
		cd->thread = gf_th_new(cd->dec->decio->module_name);
		cd->mx = gf_mx_new(cd->dec->decio->module_name);
		cd->flags |= GF_MM_CE_THREADED;
		gf_list_add(term->codecs, cd);
		goto exit;
	}

	//add codec 1- per priority 2- per type, audio being first
	//priorities inherits from Systems (5bits) so range from 0 to 31
	//we sort from MAX to MIN
	count = gf_list_count(term->codecs);
	for (i=0; i<count; i++) {
		ptr = (CodecEntry*)gf_list_get(term->codecs, i);
		if (ptr->flags & GF_MM_CE_THREADED) continue;

		//higher priority, continue
		if (ptr->dec->Priority > codec->Priority) continue;

		//same priority, put audio first
		if (ptr->dec->Priority == codec->Priority) {
			//we insert audio (0x05) before video (0x04)
			if (ptr->dec->type < codec->type) {
				gf_list_insert(term->codecs, cd, i);
				goto exit;
			}
			//same prior, same type: insert after
			if (ptr->dec->type == codec->type) {
				if (i+1==count) {
					gf_list_add(term->codecs, cd);
				} else {
					gf_list_insert(term->codecs, cd, i+1);
				}
				goto exit;
			}
			//we insert video (0x04) after audio (0x05) if next is not audio
			//last one
			if (i+1 == count) {
				gf_list_add(term->codecs, cd);
				goto exit;
			}
			next = (CodecEntry*)gf_list_get(term->codecs, i+1);
			//# priority level, insert
			if ((next->flags & GF_MM_CE_THREADED) || (next->dec->Priority != codec->Priority)) {
				gf_list_insert(term->codecs, cd, i+1);
				goto exit;
			}
			//same priority level and at least one after : continue
			continue;
		}
		gf_list_insert(term->codecs, cd, i);
		goto exit;
	}
	//if we got here, first in list
	gf_list_add(term->codecs, cd);

exit:
	gf_mx_v(term->mm_mx);
	return;
}
Ejemplo n.º 23
0
static Bool SR_UserInputIntern(GF_Renderer *sr, GF_Event *event, Bool from_user)
{
#ifdef GF_SR_EVENT_QUEUE
	GF_Event *ev;
#else
	Bool ret;
#endif

	if (sr->term && (sr->interaction_level & GF_INTERACT_INPUT_SENSOR) && (event->type<=GF_EVENT_MOUSEWHEEL))
		gf_term_mouse_input(sr->term, &event->mouse);

	if (!sr->interaction_level || (sr->interaction_level==GF_INTERACT_INPUT_SENSOR) ) {
		if (!from_user) {
			GF_USER_SENDEVENT(sr->user, event);
		}
		return 0;
	}

#ifdef GF_SR_EVENT_QUEUE
	switch (event->type) {
	case GF_EVENT_MOUSEMOVE:
	{
		u32 i, count;
		gf_mx_p(sr->ev_mx);
		count = gf_list_count(sr->events);
		for (i=0; i<count; i++) {
			ev = (GF_Event *)gf_list_get(sr->events, i);
			if (ev->type == GF_EVENT_MOUSEMOVE) {
				ev->mouse =  event->mouse;
				gf_mx_v(sr->ev_mx);
				return 1;
			}
		}
		gf_mx_v(sr->ev_mx);
	}
	default:
		ev = (GF_Event *)malloc(sizeof(GF_Event));
		ev->type = event->type;
		if (event->type<=GF_EVENT_MOUSEWHEEL) {
			ev->mouse = event->mouse;
		} else if (event->type==GF_EVENT_TEXTINPUT) {
			ev->character = event->character;
		} else {
			ev->key = event->key;
		}
		gf_mx_p(sr->ev_mx);
		gf_list_add(sr->events, ev);
		gf_mx_v(sr->ev_mx);
		break;
	}
	return 0;
#else
	gf_sr_lock(sr, 1);
	ret = sr->visual_renderer->ExecuteEvent(sr->visual_renderer, event);
	gf_sr_lock(sr, 0);
	if (!ret && !from_user) {
		SR_ForwardUserEvent(sr, event);
	}
	return ret;
#endif
}
Ejemplo n.º 24
0
void gf_term_stop_codec(GF_Codec *codec, Bool is_pause)
{
	GF_CodecCapability cap;
	Bool locked = 0;
	CodecEntry *ce;
	GF_Terminal *term = codec->odm->term;
	ce = mm_get_codec(term->codecs, codec);
	if (!ce) return;

	if (ce->mx) gf_mx_p(ce->mx);
	/*We must make sure:
		1- media codecs are synchrounously stop otherwise we could destroy the composition memory while
	the codec writes to it
		2- prevent deadlock for other codecs waiting for the scene graph
	*/
	else if (codec->CB) {
		locked = 1;
		gf_mx_p(term->mm_mx);
	} else {
		locked = gf_mx_try_lock(term->mm_mx);
	}

	if (!is_pause) {
		cap.CapCode = GF_CODEC_ABORT;
		cap.cap.valueInt = 0;
		gf_codec_set_capability(codec, cap);

		if (codec->decio && codec->odm->mo && (codec->odm->mo->flags & GF_MO_DISPLAY_REMOVE) ) {
			cap.CapCode = GF_CODEC_SHOW_SCENE;
			cap.cap.valueInt = 0;
			gf_codec_set_capability(codec, cap);
			codec->odm->mo->flags &= ~GF_MO_DISPLAY_REMOVE;
		}
	}

	/*for audio codec force CB to stop state to discard any pending AU. Not doing so would lead to a wrong estimation of the clock drift
	when resuming the object*/
	if (codec->type==GF_STREAM_AUDIO) {
		gf_codec_set_status(codec, GF_ESM_CODEC_STOP);
	}
	//if video is in a dynamic scene, reset the CB if user stop (eg codec was not in EOS). Otherwise (bifs,svg) we may want to keep the last decoded image
	else if ((codec->Status<GF_ESM_CODEC_EOS) && codec->odm && codec->odm->parentscene && codec->odm->parentscene->is_dynamic_scene && codec->CB && (codec->CB->Capacity>1)) {
		gf_codec_set_status(codec, GF_ESM_CODEC_STOP);
	}
	/*otherwise set status directly and don't touch CB state*/
	else {
		codec->Status = GF_ESM_CODEC_STOP;
	}

	/*don't wait for end of thread since this can be triggered within the decoding thread*/
	if (ce->flags & GF_MM_CE_RUNNING) {
		ce->flags &= ~GF_MM_CE_RUNNING;
		if (!ce->thread)
			term->cumulated_priority -= codec->Priority+1;
	}
	if (codec->CB) gf_cm_abort_buffering(codec->CB);

	if (ce->mx) gf_mx_v(ce->mx);
	/*cf note above*/
	else if (locked) gf_mx_v(term->mm_mx);
}
Ejemplo n.º 25
0
int dc_audio_decoder_read(AudioInputFile *audio_input_file, AudioInputData *audio_input_data)
{
	int ret;
	AVPacket packet;
	int got_frame = 0;
	AVCodecContext *codec_ctx;
	AudioDataNode *audio_data_node;

	/* Get a pointer to the codec context for the audio stream */
	codec_ctx = audio_input_file->av_fmt_ctx->streams[audio_input_file->astream_idx]->codec;

	/* Read frames */
	while (1) {
		if (audio_input_file->av_pkt_list) {
			if (gf_list_count(audio_input_file->av_pkt_list)) {
				AVPacket *packet_copy;
				assert(audio_input_file->av_pkt_list);
				gf_mx_p(audio_input_file->av_pkt_list_mutex);
				packet_copy = gf_list_pop_front(audio_input_file->av_pkt_list);
				gf_mx_v(audio_input_file->av_pkt_list_mutex);

				if (packet_copy == NULL) {
					ret = AVERROR_EOF;
				} else {
					memcpy(&packet, packet_copy, sizeof(AVPacket));
					gf_free(packet_copy);
					ret = 0;
				}
			} else {
				gf_sleep(1);
				continue;
			}
		} else {
			ret = av_read_frame(audio_input_file->av_fmt_ctx, &packet);
		}
		if (ret == AVERROR_EOF) {
			if (audio_input_file->mode == LIVE_MEDIA && audio_input_file->no_loop == 0) {
				av_seek_frame(audio_input_file->av_fmt_ctx, audio_input_file->astream_idx, 0, 0);
				continue;
			}

			/* Flush decoder */
			packet.data = NULL;
			packet.size = 0;

#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(audio_input_data->aframe);
#else
			av_frame_unref(audio_input_data->aframe);
#endif

			avcodec_decode_audio4(codec_ctx, audio_input_data->aframe, &got_frame, &packet);

			if (got_frame) {
				dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf);
				dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);
				audio_data_node = (AudioDataNode*)dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf);

				audio_data_node->abuf_size = audio_input_data->aframe->linesize[0];
				memcpy(audio_data_node->abuf, audio_input_data->aframe->data[0], audio_data_node->abuf_size);

				dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf);
				return 0;
			}

			dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf);
			dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);

			return -2;
		}
		else if (ret < 0)
		{
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot read audio frame.\n"));
			continue;
		}

		/* Is this a packet from the audio stream? */
		if (packet.stream_index == audio_input_file->astream_idx) {
			/* Set audio frame to default */

#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(audio_input_data->aframe);
#else
			av_frame_unref(audio_input_data->aframe);
#endif

			/* Decode audio frame */
			if (avcodec_decode_audio4(codec_ctx, audio_input_data->aframe, &got_frame, &packet) < 0) {
				av_free_packet(&packet);
				GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while decoding audio.\n"));
				dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf);
				dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);
				return -1;
			}

			if (audio_input_data->aframe->pts != AV_NOPTS_VALUE)
				audio_input_data->next_pts = audio_input_data->aframe->pts;

			audio_input_data->next_pts += ((int64_t)AV_TIME_BASE * audio_input_data->aframe->nb_samples) / codec_ctx->sample_rate;

			/* Did we get an audio frame? */
			if (got_frame) {
				uint8_t **data;
				int data_size;
#ifdef DC_AUDIO_RESAMPLER
				int num_planes_out;
#endif
#ifdef GPAC_USE_LIBAV
				int sample_rate = codec_ctx->sample_rate;
				int num_channels = codec_ctx->channels;
				u64 channel_layout = codec_ctx->channel_layout;
#else
				int sample_rate = audio_input_data->aframe->sample_rate;
				int num_channels = audio_input_data->aframe->channels;
				u64 channel_layout = audio_input_data->aframe->channel_layout;
#endif
				enum AVSampleFormat sample_format = (enum AVSampleFormat)audio_input_data->aframe->format;
				Bool resample = (sample_rate    != DC_AUDIO_SAMPLE_RATE
				                 || num_channels   != DC_AUDIO_NUM_CHANNELS
				                 || channel_layout != DC_AUDIO_CHANNEL_LAYOUT
				                 || sample_format  != DC_AUDIO_SAMPLE_FORMAT);

				/* Resample if needed */
				if (resample) {
#ifndef DC_AUDIO_RESAMPLER
					GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Audio resampling is needed at the decoding stage, but not supported by your version of DashCast. Aborting.\n"));
					exit(1);
#else
					uint8_t **output;
					if (ensure_resampler(audio_input_file, sample_rate, num_channels, channel_layout, sample_format)) {
						return -1;
					}

					if (resample_audio(audio_input_file, audio_input_data, codec_ctx, &output, &num_planes_out, num_channels, sample_format)) {
						return -1;
					} else {
						data = output;
						av_samples_get_buffer_size(&data_size, num_channels, audio_input_data->aframe->nb_samples, sample_format, 0);
					}
#endif
				} else {
					/*no resampling needed: read data from the AVFrame*/
					data = audio_input_data->aframe->extended_data;
					data_size = audio_input_data->aframe->linesize[0];
				}

				assert(!av_sample_fmt_is_planar(DC_AUDIO_SAMPLE_FORMAT));
				av_fifo_generic_write(audio_input_file->fifo, data[0], data_size, NULL);

				if (/*audio_input_file->circular_buf.mode == OFFLINE*/audio_input_file->mode == ON_DEMAND || audio_input_file->mode == LIVE_MEDIA) {
					dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf);

					/* Unlock the previous node in the circular buffer. */
					dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);

					/* Get the pointer of the current node in circular buffer. */
					audio_data_node = (AudioDataNode *) dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf);
					audio_data_node->channels = DC_AUDIO_NUM_CHANNELS;
					audio_data_node->channel_layout = DC_AUDIO_CHANNEL_LAYOUT;
					audio_data_node->sample_rate = DC_AUDIO_SAMPLE_RATE;
					audio_data_node->format = DC_AUDIO_SAMPLE_FORMAT;
					audio_data_node->abuf_size = data_size;
					av_fifo_generic_read(audio_input_file->fifo, audio_data_node->abuf, audio_data_node->abuf_size, NULL);

					dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf);
				} else {
					while (av_fifo_size(audio_input_file->fifo) >= LIVE_FRAME_SIZE) {
						/* Lock the current node in the circular buffer. */
						if (dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf) < 0) {
							GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[dashcast] Live system dropped an audio frame\n"));
							continue;
						}

						/* Unlock the previous node in the circular buffer. */
						dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);

						/* Get the pointer of the current node in circular buffer. */
						audio_data_node = (AudioDataNode *) dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf);

						audio_data_node->abuf_size = LIVE_FRAME_SIZE;
						av_fifo_generic_read(audio_input_file->fifo, audio_data_node->abuf, audio_data_node->abuf_size, NULL);

						dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf);
					}
				}

#ifdef DC_AUDIO_RESAMPLER
				if (resample) {
					int i;
					for (i=0; i<num_planes_out; ++i) {
						av_free(data[i]);
					}
					av_free(data);
				}
#endif

				return 0;
			}
		}

		/*
		 * Free the packet that was allocated by av_read_frame
		 */
		av_free_packet(&packet);
	}

	GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown error while reading audio frame.\n"));
	return -1;
}