Exemple #1
0
static GF_Err GAPI_Flush(GF_VideoOutput *dr, GF_Window *dest)
{
	GF_Err e;
	GAPICTX(dr);

	if (!gctx) return GF_BAD_PARAM;

	gf_mx_p(gctx->mx);
	
#ifdef GPAC_USE_OGL_ES
	if (gctx->output_3d_type==1) {
#ifndef GLES_NO_PIXMAP
		if (gctx->fullscreen && gctx->surface && gctx->egldpy) {
#endif
			if (gctx->erase_dest) {
				InvalidateRect(gctx->hWnd, NULL, TRUE);
				gctx->erase_dest = 0;
			}
			eglSwapBuffers(gctx->egldpy, gctx->surface);
#ifndef GLES_NO_PIXMAP
		} else {
		    InvalidateRect(gctx->hWnd, NULL, gctx->erase_dest);
			gctx->erase_dest = 0;
		}
#endif
		gf_mx_v(gctx->mx);
		return GF_OK;
	}
#endif
	e = GF_OK;
	if (gctx->backbuffer) {
		if (dest) {
			gctx->dst_blt = *dest;
		} else {
			assert(0);
			gctx->dst_blt.x = gctx->dst_blt.y = 0;
			gctx->dst_blt.w = gctx->bb_width;
			gctx->dst_blt.h = gctx->bb_height;
		}
		if (gctx->gx_mode) {
			if (!gctx->fullscreen && gctx->erase_dest) {
				InvalidateRect(gctx->hWnd, NULL, TRUE);
				gctx->erase_dest = 0;
			}
			e = GAPI_FlipBackBuffer(dr);
		} else {
#ifndef DIRECT_BITBLT
			InvalidateRect(gctx->hWnd, NULL, gctx->erase_dest);
#else
//			BitBlt(gctx->hdc, gctx->dst_blt.x, gctx->dst_blt.y, gctx->bb_width, gctx->bb_height, gctx->hdcBitmap, 0, 0, SRCCOPY);
			HDC dc = GetDC(NULL);
			BitBlt(dc, gctx->dst_blt.x, gctx->dst_blt.y, gctx->bb_width, gctx->bb_height, gctx->hdcBitmap, 0, 0, SRCCOPY);
			ReleaseDC(NULL, dc);
#endif
			gctx->erase_dest = 0;
		}
	}
	gf_mx_v(gctx->mx);
	return e;
}
Exemple #2
0
static u32 gf_dm_session_thread(void *par)
{
	GF_DownloadSession *sess = (GF_DownloadSession *)par;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CORE, ("[Downloader] Entering thread ID %d\n", gf_th_id() ));

	sess->flags &= ~GF_DOWNLOAD_SESSION_THREAD_DEAD;
	while (!sess->destroy) {
		gf_mx_p(sess->mx);
		if (sess->status >= GF_NETIO_DISCONNECTED) {
			gf_mx_v(sess->mx);
			break;
		}

		if (sess->status < GF_NETIO_CONNECTED) {
			gf_dm_connect(sess);
		} else {
			if (sess->status == GF_NETIO_WAIT_FOR_REPLY) gf_sleep(GF_WAIT_REPLY_SLEEP);
			sess->do_requests(sess);
		}
		gf_mx_v(sess->mx);
		gf_sleep(2);
	}
	/*destroy all sessions*/
	gf_dm_disconnect(sess);
	sess->status = GF_NETIO_STATE_ERROR;
	sess->last_error = 0;
	sess->flags |= GF_DOWNLOAD_SESSION_THREAD_DEAD;
	return 1;
}
Exemple #3
0
NPT_Result
GPAC_GenericDevice::OnAction(PLT_ActionReference&          action,
                             const PLT_HttpRequestContext& context)
{
	NPT_COMPILER_UNUSED(context);

#ifdef GPAC_HAS_SPIDERMONKEY
	gf_mx_p(m_pMutex);
#endif
	PLT_ActionDesc &act_desc = action->GetActionDesc();
	NPT_String name = act_desc.GetName();
#ifdef GPAC_HAS_SPIDERMONKEY
	assert(!m_pSema);
#endif
	GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK, ("[UPnP] Action %s called (thread %d)\n", (char *) name, gf_th_id() ));

#ifdef GPAC_HAS_SPIDERMONKEY
	if (JSVAL_IS_NULL(act_proc)) {
		gf_mx_v(m_pMutex);
		return NPT_SUCCESS;
	}

	jsval argv[2];

	m_pUPnP->LockJavascript(GF_TRUE);

	JSObject *js_action = JS_NewObject(m_pUPnP->m_pJSCtx, &m_pUPnP->upnpDeviceClass._class, 0, 0);
	argv[0] = OBJECT_TO_JSVAL(js_action);
	SMJS_SET_PRIVATE(m_pUPnP->m_pJSCtx, js_action, this);

	act_ref = action;

	JS_DefineProperty(m_pUPnP->m_pJSCtx, js_action, "Name", STRING_TO_JSVAL( JS_NewStringCopyZ(m_pUPnP->m_pJSCtx, name) ), 0, 0, JSPROP_READONLY | JSPROP_PERMANENT);
	GPAC_Service *service = (GPAC_Service *) act_desc.GetService();
	JS_DefineProperty(m_pUPnP->m_pJSCtx, js_action, "Service", service->m_pObj ? OBJECT_TO_JSVAL( service->m_pObj) : JSVAL_NULL, 0, 0, JSPROP_READONLY | JSPROP_PERMANENT);
	JS_DefineFunction(m_pUPnP->m_pJSCtx, js_action, "GetArgument", upnp_action_get_argument, 1, 0);
	JS_DefineFunction(m_pUPnP->m_pJSCtx, js_action, "SendReply", upnp_action_send_reply, 1, 0);

	/*create a semaphore*/
	m_pSema = gf_sema_new(1, 0);

	jsval rval;
	JS_CallFunctionValue(m_pUPnP->m_pJSCtx, obj, act_proc, 1, argv, &rval);
	SMJS_SET_PRIVATE(m_pUPnP->m_pJSCtx, js_action, NULL);
	m_pUPnP->LockJavascript(GF_FALSE);

	if (JSVAL_IS_INT(rval) && (JSVAL_TO_INT(rval) != 0)) {
		action->SetError(JSVAL_TO_INT(rval), "Action Failed");
	}
	/*wait on the semaphore*/
	if (!gf_sema_wait_for(m_pSema, 10000)) {
		GF_LOG(GF_LOG_WARNING, GF_LOG_NETWORK, ("[UPnP] Reply processing to action %s timeout - sending incomplete reply)\n", (char *) name));
	}
	gf_sema_del(m_pSema);
	m_pSema = NULL;

	gf_mx_v(m_pMutex);
#endif
	return NPT_SUCCESS;
}
Exemple #4
0
Bool gf_term_lock_codec(GF_Codec *codec, Bool lock, Bool trylock)
{
	Bool res = 1;
	CodecEntry *ce;
	GF_Terminal *term = codec->odm->term;

	ce = mm_get_codec(term->codecs, codec);
	if (!ce) return 0;

	if (ce->mx) {
		if (lock) {
			if (trylock) {
				res = gf_mx_try_lock(ce->mx);
			} else {
				res = gf_mx_p(ce->mx);
			}
		}
		else gf_mx_v(ce->mx);
	}
	else {
		if (lock) {
			if (trylock) {
				res = gf_mx_try_lock(term->mm_mx);
			} else {
				res = gf_mx_p(term->mm_mx);
			}
		}
		else gf_mx_v(term->mm_mx);
	}
	return res;
}
Exemple #5
0
GF_Renderer *gf_sr_new(GF_User *user, Bool self_threaded, GF_Terminal *term)
{
	GF_Renderer *tmp = SR_New(user);
	if (!tmp) return NULL;
	tmp->term = term;

	/**/
	tmp->audio_renderer = gf_sr_ar_load(user);	
	if (!tmp->audio_renderer) GF_USER_MESSAGE(user, "", "NO AUDIO RENDERER", GF_OK);

	gf_mx_p(tmp->mx);

	/*run threaded*/
	if (self_threaded) {
		tmp->VisualThread = gf_th_new();
		gf_th_run(tmp->VisualThread, SR_RenderRun, tmp);
		while (tmp->video_th_state!=1) {
			gf_sleep(10);
			if (tmp->video_th_state==3) {
				gf_mx_v(tmp->mx);
				gf_sr_del(tmp);
				return NULL;
			}
		}
	}

	/*set default size if owning output*/
	if (!tmp->user->os_window_handler) {
		gf_sr_set_size(tmp, 320, 20);
	}

	gf_mx_v(tmp->mx);

	return tmp;
}
Exemple #6
0
int dc_consumer_lock(Consumer *consumer, CircularBuffer *circular_buf)
{
	Node *node = &circular_buf->list[consumer->idx];

	gf_mx_p(node->mutex);
	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("consumer %s enters lock %d\n", consumer->name, consumer->idx));
	if (node->marked == 2) {
		gf_mx_v(node->mutex);
		return -1;
	}

	node->num_consumers_waiting++;
	while (node->num_producers || !node->marked) {
		gf_mx_v(node->mutex);
		gf_sema_wait(node->consumers_semaphore);
		gf_mx_p(node->mutex);

		if (node->marked == 2) {
			gf_mx_v(node->mutex);
			return -1;
		}
	}
	node->num_consumers_waiting--;

	if (node->marked == 2) {
		gf_mx_v(node->mutex);
		return -1;
	}
	node->num_consumers++;
	node->num_consumers_accessed++;
	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("consumer %s exits lock %d \n", consumer->name, consumer->idx));
	gf_mx_v(node->mutex);

	return 0;
}
Exemple #7
0
int dc_producer_lock(Producer *producer, CircularBuffer *circular_buf)
{
	Node *node = &circular_buf->list[producer->idx];

	gf_mx_p(node->mutex);
	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("producer %s enters lock %d \n", producer->name, producer->idx));

	if ( (circular_buf->mode == LIVE_CAMERA || circular_buf->mode == LIVE_MEDIA) && (node->num_consumers || node->marked)) {
		gf_mx_v(node->mutex);
		return -1;
	}

	while (node->num_consumers || node->marked) {
		gf_mx_v(node->mutex);
		gf_sema_wait(node->producers_semaphore);
		gf_mx_p(node->mutex);
	}

	node->num_producers++;
	if (circular_buf->size>1) {
		node->marked = 1;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("producer %s exits lock %d \n", producer->name, producer->idx));
	gf_mx_v(node->mutex);

	return 0;
}
Exemple #8
0
GF_Err SDLVid_ResizeWindow(GF_VideoOutput *dr, u32 width, u32 height) 
{
	SDLVID();
	GF_Event evt;

	/*lock X mutex to make sure the event queue is not being processed*/
	gf_mx_p(ctx->evt_mx);

	if (ctx->output_3d_type==1) {
		u32 flags, nb_bits;
		const char *opt;
		if ((ctx->width==width) && (ctx->height==height) ) {
			gf_mx_v(ctx->evt_mx);
			return GF_OK;
		}
		flags = SDL_GL_WINDOW_FLAGS;
		if (ctx->os_handle) flags &= ~SDL_RESIZABLE;
		if (ctx->fullscreen) flags |= SDL_FULLSCREEN_FLAGS;
		if (!ctx->screen) ctx->screen = SDL_SetVideoMode(width, height, 0, flags);
		SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);

		opt = gf_modules_get_option((GF_BaseInterface *)dr, "Video", "GLNbBitsDepth");
		nb_bits = opt ? atoi(opt) : 16;
		SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, nb_bits);
		SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 0);
		opt = gf_modules_get_option((GF_BaseInterface *)dr, "Video", "GLNbBitsPerComponent");
		nb_bits = opt ? atoi(opt) : 5;
		SDL_GL_SetAttribute(SDL_GL_RED_SIZE, nb_bits);
		SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, nb_bits);
		SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, nb_bits);

		assert(width);
		assert(height);
		ctx->screen = SDL_SetVideoMode(width, height, 0, flags);
		assert(ctx->screen);
		ctx->width = width;
		ctx->height = height;
		memset(&evt, 0, sizeof(GF_Event));
		evt.type = GF_EVENT_VIDEO_SETUP;
		dr->on_event(dr->evt_cbk_hdl, &evt);		
	} else {
		u32 flags;
#ifdef GPAC_IPHONE
		flags = SDL_FULLSCREEN_FLAGS;
		//SDL readme says it would make us faster
		SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 0);
		SDL_GL_SetAttribute(SDL_GL_RETAINED_BACKING, 1);
#else
		flags = SDL_WINDOW_FLAGS;
#endif
		if (ctx->os_handle) flags &= ~SDL_RESIZABLE;
		ctx->screen = SDL_SetVideoMode(width, height, 0, flags);
	}
	gf_mx_v(ctx->evt_mx);
	return ctx->screen ? GF_OK : GF_IO_ERR;
}
Exemple #9
0
void gf_term_stop_codec(GF_Codec *codec, Bool is_pause)
{
	GF_CodecCapability cap;
	Bool locked = 0;
	CodecEntry *ce;
	GF_Terminal *term = codec->odm->term;
	ce = mm_get_codec(term->codecs, codec);
	if (!ce) return;

	if (ce->mx) gf_mx_p(ce->mx);
	/*We must make sure:
		1- media codecs are synchrounously stop otherwise we could destroy the composition memory while
	the codec writes to it
		2- prevent deadlock for other codecs waiting for the scene graph
	*/
	else if (codec->CB) {
		locked = 1;
		gf_mx_p(term->mm_mx);
	} else {
		locked = gf_mx_try_lock(term->mm_mx);
	}

	if (!is_pause) {
		cap.CapCode = GF_CODEC_ABORT;
		cap.cap.valueInt = 0;
		gf_codec_set_capability(codec, cap);

		if (codec->decio && codec->odm->mo && (codec->odm->mo->flags & GF_MO_DISPLAY_REMOVE) ) {
			cap.CapCode = GF_CODEC_SHOW_SCENE;
			cap.cap.valueInt = 0;
			gf_codec_set_capability(codec, cap);
			codec->odm->mo->flags &= ~GF_MO_DISPLAY_REMOVE;
		}
	}

	/*set status directly and don't touch CB state*/
	codec->Status = GF_ESM_CODEC_STOP;
	/*don't wait for end of thread since this can be triggered within the decoding thread*/
	if (ce->flags & GF_MM_CE_RUNNING) {
		ce->flags &= ~GF_MM_CE_RUNNING;
		if (!ce->thread)
			term->cumulated_priority -= codec->Priority+1;
	}
	if (codec->CB) gf_cm_abort_buffering(codec->CB);

	if (ce->mx) gf_mx_v(ce->mx);
	/*cf note above*/
	else if (locked) gf_mx_v(term->mm_mx);
}
Exemple #10
0
GF_Err SDLVid_SetFullScreen(GF_VideoOutput *dr, u32 bFullScreenOn, u32 *screen_width, u32 *screen_height)
{
	u32 bpp, pref_bpp;
	SDLVID();

	if (ctx->fullscreen==bFullScreenOn) return GF_OK;

	/*lock to get sure the event queue is not processed under X*/
	gf_mx_p(ctx->evt_mx);
	ctx->fullscreen = bFullScreenOn;

	pref_bpp = bpp = ctx->screen->format->BitsPerPixel;

	if (ctx->fullscreen) {
		u32 flags;
		Bool switch_res = 0;
		const char *sOpt = gf_modules_get_option((GF_BaseInterface *)dr, "Video", "SwitchResolution");
		if (sOpt && !stricmp(sOpt, "yes")) switch_res = 1;
		if (!dr->max_screen_width || !dr->max_screen_height) switch_res = 1;

		flags = (ctx->output_3d_type==1) ? SDL_GL_FULLSCREEN_FLAGS : SDL_FULLSCREEN_FLAGS;
		ctx->store_width = *screen_width;
		ctx->store_height = *screen_height;
		if (switch_res) {
			u32 i;
			ctx->fs_width = *screen_width;
			ctx->fs_height = *screen_height;
			for(i=0; i<nb_video_modes; i++) {
				if (ctx->fs_width<=video_modes[2*i] && ctx->fs_height<=video_modes[2*i + 1]) {
					if ((pref_bpp = SDL_VideoModeOK(video_modes[2*i], video_modes[2*i+1], bpp, flags))) {
						ctx->fs_width = video_modes[2*i];
						ctx->fs_height = video_modes[2*i + 1];
						break;
					}
				}
			}
		} else {
			ctx->fs_width = dr->max_screen_width;
			ctx->fs_height = dr->max_screen_height;
		}
		ctx->screen = SDL_SetVideoMode(ctx->fs_width, ctx->fs_height, pref_bpp, flags);
		/*we switched bpp, clean all objects*/
		if (bpp != pref_bpp) SDLVid_DestroyObjects(ctx);
		*screen_width = ctx->fs_width;
		*screen_height = ctx->fs_height;
		/*GL has changed*/
		if (ctx->output_3d_type==1) {
			GF_Event evt;
			evt.type = GF_EVENT_VIDEO_SETUP;
			dr->on_event(dr->evt_cbk_hdl, &evt);
		}
	} else {
		SDLVid_ResizeWindow(dr, ctx->store_width, ctx->store_height);
		*screen_width = ctx->store_width;
		*screen_height = ctx->store_height;
	}
	gf_mx_v(ctx->evt_mx);
	if (!ctx->screen) return GF_IO_ERR;
	return GF_OK;
}
Exemple #11
0
void dc_audio_decoder_close(AudioInputFile *audio_input_file)
{
	/*
	 * Close the audio format context
	 */
	avformat_close_input(&audio_input_file->av_fmt_ctx);

	if (audio_input_file->av_pkt_list_mutex) {
		gf_mx_p(audio_input_file->av_pkt_list_mutex);
		while (gf_list_count(audio_input_file->av_pkt_list)) {
			AVPacket *pkt = gf_list_last(audio_input_file->av_pkt_list);
			av_free_packet(pkt);
			gf_list_rem_last(audio_input_file->av_pkt_list);
		}
		gf_list_del(audio_input_file->av_pkt_list);
		gf_mx_v(audio_input_file->av_pkt_list_mutex);
		gf_mx_del(audio_input_file->av_pkt_list_mutex);
	}

	av_fifo_free(audio_input_file->fifo);

#ifdef DC_AUDIO_RESAMPLER
	avresample_free(&audio_input_file->aresampler);
#endif
}
Exemple #12
0
void gf_codec_del(GF_Codec *codec)
{
	if (!codec || !codec->inChannels)
	  return;
	if (gf_list_count(codec->inChannels)) return;

	if (!(codec->flags & GF_ESM_CODEC_IS_USE)) {
		switch (codec->type) {
		/*input sensor streams are handled internally for now*/
#ifndef GPAC_DISABLE_VRML
		case GF_STREAM_INTERACT:
			gf_mx_p(codec->odm->term->net_mx);
			gf_isdec_del(codec->decio);
			gf_list_del_item(codec->odm->term->input_streams, codec);
			gf_mx_v(codec->odm->term->net_mx);
			break;
#endif
		default:
			gf_modules_close_interface((GF_BaseInterface *) codec->decio);
			break;
		}
	}
	if (codec->CB) gf_cm_del(codec->CB);
	codec->CB = NULL;
	if (codec->inChannels) gf_list_del(codec->inChannels);
	codec->inChannels = NULL;
	gf_free(codec);
}
Exemple #13
0
static void avr_on_video_frame ( void *udta, u32 time )
{
    u32 i, j;
    GF_Err e;
    GF_VideoSurface fb;
    GF_AVRedirect *avr = ( GF_AVRedirect * ) udta;
    if (start_if_needed(avr))
        return;
    gf_mx_p(avr->frameMutex);
    e = gf_sc_get_screen_buffer ( avr->term->compositor, &fb, 0 );
    if ( e )
    {
        GF_LOG ( GF_LOG_ERROR, GF_LOG_MODULE, ( "[AVRedirect] Error grabing frame buffer %s\n", gf_error_to_string ( e ) ) );
        return;
    }
    /*convert frame*/
    for ( i=0; i<fb.height; i++ )
    {
        char *dst = avr->frame + i * fb.width * 3;
        char *src = fb.video_buffer + i * fb.pitch_y;
        for ( j=0; j<fb.width; j++ )
        {
            dst[0] = src[2];
            dst[1] = src[1];
            dst[2] = src[0];
            src+=4;
            dst += 3;
        }
    }
    avr->frameTime = time;
    gf_mx_v(avr->frameMutex);
    gf_sc_release_screen_buffer ( avr->term->compositor, &fb );
    GF_LOG ( GF_LOG_DEBUG, GF_LOG_MODULE, ( "[AVRedirect] Writing video frame\n" ) );
}
Exemple #14
0
int dc_consumer_unlock_previous(Consumer *consumer, CircularBuffer *circular_buf)
{
	int node_idx = (consumer->idx - 1 + consumer->max_idx) % consumer->max_idx;
	int last_consumer = 0;
	Node *node = &circular_buf->list[node_idx];

	gf_mx_p(node->mutex);

	node->num_consumers--;
	if (node->num_consumers < 0)
		node->num_consumers = 0;

	if (node->num_consumers_accessed == circular_buf->max_num_consumers) {
		if (node->marked != 2)
			node->marked = 0;
		node->num_consumers_accessed = 0;
		last_consumer = 1;
	}

	gf_sema_notify(node->producers_semaphore, 1);

	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("consumer %s unlock %d \n", consumer->name, node_idx));
	gf_mx_v(node->mutex);

	return last_consumer;
}
Exemple #15
0
GF_Err ISOR_DisconnectChannel(GF_InputService *plug, LPNETCHANNEL channel)
{
	ISOMChannel *ch;
	GF_Err e;
	ISOMReader *read;
	if (!plug || !plug->priv) return GF_SERVICE_ERROR;
	read = (ISOMReader *) plug->priv;
	if (!read->mov) return GF_SERVICE_ERROR;

	gf_mx_p(read->segment_mutex);
	e = GF_OK;
	ch = isor_get_channel(read, channel);
	assert(ch);
	if (!ch) {
		e = GF_STREAM_NOT_FOUND;
		goto exit;
	}
	/*signal the service is broken but still process the delete*/
	isor_delete_channel(read, ch);
	assert(!isor_get_channel(read, channel));

exit:
	if (read->input->query_proxy && read->input->proxy_udta && read->input->proxy_type) {
		send_proxy_command(read, 1, 0, e, NULL, channel);
	} else {
		gf_service_disconnect_ack(read->service, channel, e);
	}
	gf_mx_v(read->segment_mutex);
	return e;
}
Exemple #16
0
Bool ts_encode_video_frame(GF_AbstractTSMuxer* ts, uint8_t* data, int encoded) {
    AVPacketList *pl;
    AVPacket * pkt;
    if (!ts->encode)
        return 1;
    pl = gf_malloc(sizeof(AVPacketList));
    pl->next = NULL;
    pkt = &(pl->pkt);

    av_init_packet(pkt);

    if (ts->video_st->codec->coded_frame->pts != AV_NOPTS_VALUE) {
        //pkt->pts= av_rescale_q(ts->video_st->codec->coded_frame->pts, ts->video_st->codec->time_base, ts->video_st->time_base);
        pkt->pts = ts->video_st->codec->coded_frame->pts * ts->video_st->time_base.den / ts->video_st->time_base.num / 1000;
        //pkt->pts = ts->video_st->codec->coded_frame->pts;
    }
    if (ts->video_st->codec->coded_frame->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    pkt->stream_index= ts->video_st->index;
    pkt->data= data;
    pkt->size= encoded;
    //fprintf(stderr, "VIDEO PTS="LLU" was: "LLU" (%p)\n", pkt->pts, ts->video_st->codec->coded_frame->pts, pl);
    gf_mx_p(ts->videoMx);
    if (!ts->videoPackets)
        ts->videoPackets = pl;
    else {
        AVPacketList * px = ts->videoPackets;
        while (px->next)
            px = px->next;
        px->next = pl;
    }
    gf_mx_v(ts->videoMx);
    return 0;
}
Exemple #17
0
void gf_term_remove_codec(GF_Terminal *term, GF_Codec *codec)
{
	u32 i;
	Bool locked;
	CodecEntry *ce;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[Terminal] Unregistering codec %s\n", codec->decio ? codec->decio->module_name : "RAW"));

	/*cf note above*/
	locked = gf_mx_try_lock(term->mm_mx);

	i=0;
	while ((ce = (CodecEntry*)gf_list_enum(term->codecs, &i))) {
		if (ce->dec != codec) continue;

		if (ce->thread) {
			if (ce->flags & GF_MM_CE_RUNNING) {
				ce->flags &= ~GF_MM_CE_RUNNING;
				while (! (ce->flags & GF_MM_CE_DEAD)) gf_sleep(10);
				ce->flags &= ~GF_MM_CE_DEAD;
			}
			gf_th_del(ce->thread);
			gf_mx_del(ce->mx);
		}
		if (locked) {
			gf_free(ce);
			gf_list_rem(term->codecs, i-1);
		} else {
			ce->flags |= GF_MM_CE_DISCARDED;
		}
		break;
	}
	if (locked) gf_mx_v(term->mm_mx);
	return;
}
Exemple #18
0
u32 RunSingleDec(void *ptr)
{
	GF_Err e;
	u64 time_taken;
	CodecEntry *ce = (CodecEntry *) ptr;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CORE, ("[MediaDecoder %d] Entering thread ID %d\n", ce->dec->odm->OD->objectDescriptorID, gf_th_id() ));

	while (ce->flags & GF_MM_CE_RUNNING) {
		time_taken = gf_sys_clock_high_res();
		if (!ce->dec->force_cb_resize) {
			gf_mx_p(ce->mx);
			e = gf_codec_process(ce->dec, ce->dec->odm->term->frame_duration);
			if (e) gf_term_message(ce->dec->odm->term, ce->dec->odm->net_service->url, "Decoding Error", e);
			gf_mx_v(ce->mx);
		}
		time_taken = gf_sys_clock_high_res() - time_taken;


		/*no priority boost this way for systems codecs, priority is dynamically set by not releasing the
		graph when late and moving on*/
		if (!ce->dec->CB || (ce->dec->CB->UnitCount == ce->dec->CB->Capacity))
			ce->dec->PriorityBoost = 0;

		/*while on don't sleep*/
		if (ce->dec->PriorityBoost) continue;

		if (time_taken<20) {
			gf_sleep(1);
		}
	}
	ce->flags |= GF_MM_CE_DEAD;
	return 0;
}
Exemple #19
0
GF_Err GAPI_ClearFS(GAPIPriv *gctx, unsigned char *ptr)
{
	gf_mx_p(gctx->mx);
	memset(ptr, 0, sizeof(char) * gctx->screen_w*gctx->screen_h*gctx->BPP);
	gf_mx_v(gctx->mx);
	return GF_OK;
}
Exemple #20
0
s32 gf_cache_remove_session_from_cache_entry(DownloadedCacheEntry entry, GF_DownloadSession * sess) {
	u32 i;
	s32 count;
	if (!entry || !sess || !entry->sessions)
		return -1;
	count = gf_list_count(entry->sessions);
	for (i = 0 ; i < (u32)count; i++) {
		GF_DownloadSession * s = gf_list_get(entry->sessions, i);
		if (s == sess) {
			gf_list_rem(entry->sessions, i);
			count --;
			break;
		}
	}
	if (entry->write_session == sess) {
		/* OK, this is not optimal to close it since we are in a mutex,
		* but we don't want to risk to have another session opening
		* a not fully closed cache entry */
		if (entry->writeFilePtr) {
			if (fclose(entry->writeFilePtr)) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[CACHE] gf_cache_remove_session_from_cache_entry:%d, Failed to properly fclose cache file '%s' of url '%s', cache may be corrupted !\n", __LINE__, entry->cache_filename, entry->url));
			}
		}
		entry->writeFilePtr = NULL;
		entry->write_session = NULL;
#ifdef ENABLE_WRITE_MX
		gf_mx_v(entry->write_mutex);
#endif
	}
	return count;
}
Exemple #21
0
//-------------------------------
void CNativeWrapper::SetupLogs() {
	const char *opt;
	debug_log("SetupLogs()");

	gf_mx_p(m_mx);
	gf_log_set_tools_levels( gf_cfg_get_key(m_user.config, "General", "Logs") );
	gf_log_set_callback(this, on_gpac_log);
	opt = gf_cfg_get_key(m_user.config, "General", "LogFile");
	if (opt) {
		JavaEnvTh *env = getEnv();
		if (env && env->cbk_setLogFile) {
			env->env->PushLocalFrame(1);
			jstring js = env->env->NewStringUTF(opt);
			env->env->CallVoidMethod(env->cbk_obj, env->cbk_setLogFile, js);
			env->env->PopLocalFrame(NULL);
		}
	}
	gf_mx_v(m_mx);

	GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("Osmo4 logs initialized\n"));
	/* Test for JNI invocations, should work properly
	int k;
	for (k = 0 ; k < 512; k++){
		GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("Message %d\n", k));
	}*/
}
Exemple #22
0
Fichier : saf.c Projet : erelh/gpac
GF_Err gf_saf_mux_stream_add(GF_SAFMuxer *mux, u32 stream_id, u32 ts_res, u32 buffersize_db, u8 stream_type, u8 object_type, char *mime_type, char *dsi, u32 dsi_len, char *remote_url)
{
	GF_SAFStream *str = saf_get_stream(mux, stream_id);
	if (str) return GF_BAD_PARAM;

	if (mux->state == 2) return GF_BAD_PARAM;

	gf_mx_p(mux->mx);

	GF_SAFEALLOC(str, GF_SAFStream);
	str->stream_id = stream_id;
	str->ts_resolution = ts_res;
	str->buffersize_db = buffersize_db;
	str->stream_type = stream_type;
	str->object_type = object_type;
	if (mime_type) {
		str->mime_type = gf_strdup(mime_type);
		str->stream_type = str->object_type = 0xFF;
	}
	str->dsi_len = dsi_len;
	if (dsi_len) {
		str->dsi = (char *) gf_malloc(sizeof(char)*dsi_len);
		memcpy(str->dsi, dsi, sizeof(char)*dsi_len);
	}
	if (remote_url) str->remote_url = gf_strdup(remote_url);
	str->aus = gf_list_new();
	mux->state = 0;
	gf_list_add(mux->streams, str);
	gf_mx_v(mux->mx);
	return GF_OK;
}
Exemple #23
0
GF_Err gf_cache_close_write_cache( const DownloadedCacheEntry entry, const GF_DownloadSession * sess, Bool success ) {
	GF_Err e = GF_OK;
	CHECK_ENTRY;
	if (!sess || !entry->write_session || entry->write_session != sess)
		return GF_OK;
	assert( sess == entry->write_session );
	if (entry->writeFilePtr) {
		GF_LOG(GF_LOG_INFO, GF_LOG_NETWORK,
		       ("[CACHE] Closing file %s, %d bytes written.\n", entry->cache_filename, entry->written_in_cache));
		if (fflush( entry->writeFilePtr ) || fclose( entry->writeFilePtr ))
			e = GF_IO_ERR;
		e|= gf_cache_flush_disk_cache(entry);
		if (e == GF_OK && success) {
			e|= gf_cache_set_last_modified_on_disk( entry, gf_cache_get_last_modified_on_server(entry));
			e|= gf_cache_set_etag_on_disk( entry, gf_cache_get_etag_on_server(entry));
		}
		e|= gf_cache_flush_disk_cache(entry);
#if defined(_BSD_SOURCE) || _XOPEN_SOURCE >= 500
		/* On  UNIX, be sure to flush all the data */
		sync();
#endif
		entry->writeFilePtr = NULL;
		if (GF_OK != e) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_NETWORK, ("[CACHE] Failed to fully write file on cache, e=%d\n", e));
		}
	}
	entry->write_session = NULL;
#ifdef ENABLE_WRITE_MX
	gf_mx_v(entry->write_mutex);
#endif
	return e;
}
Exemple #24
0
void gf_clock_pause(GF_Clock *ck)
{
	gf_mx_p(ck->mx);
	if (!ck->Paused) ck->PauseTime = gf_term_get_time(ck->term);
	ck->Paused += 1;
	gf_mx_v(ck->mx);
}
Exemple #25
0
/*buffering scene protected by a mutex because it may be triggered by composition memory (audio or visual threads)*/
void gf_clock_buffer_on(GF_Clock *ck)
{
	gf_mx_p(ck->mx);
	if (!ck->Buffering) gf_clock_pause(ck);
	ck->Buffering += 1;
	gf_mx_v(ck->mx);
}
static Bool has_packet_ready(GF_AbstractTSMuxer* ts, GF_Mutex * mx, AVPacketList ** pkts) {
	Bool ret;
	gf_mx_p(mx);
	ret = (*pkts) != NULL;
	gf_mx_v(mx);
	return ret;
}
Exemple #27
0
/* Traverses the list of Access Units already demuxed & parsed to update the buffered status */
void gf_mse_source_buffer_update_buffered(GF_HTML_SourceBuffer *sb) {
	u32 i;
	u32 track_count;

	track_count = gf_list_count(sb->tracks);
	gf_html_timeranges_reset(sb->buffered);
	for (i = 0; i < track_count; i++) {
		GF_HTML_MediaTimeRanges *track_ranges;
		GF_HTML_Track *track = (GF_HTML_Track *)gf_list_get(sb->tracks, i);
		gf_mx_p(track->buffer_mutex);
		track_ranges = gf_mse_timeranges_from_track_packets(track);
		if (i != 0) {
			GF_HTML_MediaTimeRanges *tmp;
			tmp = gf_html_timeranges_intersection(sb->buffered, track_ranges);
			gf_html_timeranges_del(track_ranges);
			gf_list_del(sb->buffered->times);
			sb->buffered->times = tmp->times;
			sb->buffered->timescale = tmp->timescale;
			gf_free(tmp);
		} else {
			gf_list_del(sb->buffered->times);
			sb->buffered->times = track_ranges->times;
			sb->buffered->timescale = track_ranges->timescale;
			gf_free(track_ranges);
		}
		gf_mx_v(track->buffer_mutex);
	}
}
Exemple #28
0
/* Requests from the decoders to get the next Access Unit: we get it from the track buffer */
GF_EXPORT
GF_Err gf_mse_track_buffer_get_next_packet(GF_HTML_Track *track,
        char **out_data_ptr, u32 *out_data_size,
        GF_SLHeader *out_sl_hdr, Bool *sl_compressed,
        GF_Err *out_reception_status, Bool *is_new_data)
{
	GF_MSE_Packet *packet;
	u32 count;
	gf_mx_p(track->buffer_mutex);
	count = gf_list_count(track->buffer);
	packet = (GF_MSE_Packet *)gf_list_get(track->buffer, track->packet_index);
	if (packet) {
		*out_data_ptr = packet->data;
		*out_data_size = packet->size;
		*out_sl_hdr = packet->sl_header;
		*sl_compressed = packet->is_compressed;
		*out_reception_status = packet->status;
		*is_new_data = packet->is_new_data;
		packet->is_new_data = GF_FALSE;
		GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE_IN] Sending AU #%d/%d to decoder with PTS %g s\n", track->packet_index, count, TIMESCALE_TO_SECONDS(packet->sl_header.compositionTimeStamp)));
	} else {
		*out_data_ptr = NULL;
		*out_data_size = 0;
		*sl_compressed = GF_FALSE;
		*out_reception_status = GF_OK;
		*is_new_data = GF_FALSE;
		GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MSE_In] No AU for decoder\n"));
	}
	gf_mx_v(track->buffer_mutex);
	return GF_OK;
}
Exemple #29
0
GF_EXPORT
GF_Err gf_term_process_flush(GF_Terminal *term)
{
	u32 i;
	CodecEntry *ce;
	if (!(term->flags & GF_TERM_NO_COMPOSITOR_THREAD) ) return GF_BAD_PARAM;

	/*update till frame mature*/
	while (1) {

		if (term->flags & GF_TERM_NO_DECODER_THREAD) {
			gf_term_handle_services(term);
			gf_mx_p(term->mm_mx);
			i=0;
			while ((ce = (CodecEntry*)gf_list_enum(term->codecs, &i))) {
				gf_codec_process(ce->dec, 10000);
			}
			gf_mx_v(term->mm_mx);
		}

		if (!gf_sc_draw_frame(term->compositor, NULL))
			break;

		if (! (term->user->init_flags & GF_TERM_NO_REGULATION))
			break;
	}
	return GF_OK;
}
Exemple #30
0
void 
GPAC_MediaController::OnMSStateVariablesChanged(PLT_Service* service, NPT_List<PLT_StateVariable*>* vars)
{
	GPAC_MediaServerItem *ms = NULL;
	gf_mx_p(m_ControlPointLock);

	u32 i, count;
	count = gf_list_count(m_MediaServers);
	for (i=0; i<count; i++) {
		GPAC_MediaServerItem *ms = (GPAC_MediaServerItem *) gf_list_get(m_MediaServers, i);
		if (ms->m_UUID==service->GetDevice()->GetUUID()) {
			break;
		}
		ms = NULL;
	}
	
	if (!ms) {
		gf_mx_v(m_ControlPointLock);
		return;
	}

    PLT_StateVariable* var = PLT_StateVariable::Find(*vars, "ContainerUpdateIDs");
    if (var) {
        // variable found, parse value
        NPT_String value = var->GetValue();
        NPT_String item_id, update_id;
        int index;

        while (value.GetLength()) {
            // look for container id
            index = value.Find(',');
            if (index < 0) break;
            item_id = value.Left(index);
            value = value.SubString(index+1);

            // look for update id
            if (value.GetLength()) {
                index = value.Find(',');
                update_id = (index<0)?value:value.Left(index);
                value = (index<0)?"":value.SubString(index+1);

				m_pUPnP->ContainerChanged(ms->m_device, item_id, update_id);
            }       
        }
    }        
	gf_mx_v(m_ControlPointLock);
}