/********************* return NULL on error *********************/ static char * __copy_group(const char * src_table, const char * src_file, const char * dst_table, const char * dst_file, const char * group_name, va_list ap) { const config_t * src_config = NULL; const config_t * dst_config = NULL; config_setting_t * src_setting = NULL; config_setting_t * dst_setting = NULL; char * path; char * full_path; char * new_group_name = NULL; const char * group_name_used = NULL; path = get_path(ap); full_path = add_entry_to_path(path, (char *)group_name); if(full_path == NULL) { return NULL; } SDL_LockMutex(entry_mutex); src_config = get_config(src_table,src_file); if(src_config==NULL) { SDL_UnlockMutex(entry_mutex); free(path); free(full_path); return NULL; } src_setting = config_lookup (src_config, full_path); if( src_setting == NULL ) { SDL_UnlockMutex(entry_mutex); free(path); free(full_path); return NULL; } dst_config = get_config(dst_table,dst_file); if(dst_config==NULL) { SDL_UnlockMutex(entry_mutex); free(path); free(full_path); return NULL; } dst_setting = config_lookup(dst_config, full_path); free(full_path); /* if the setting does not exist, create it */ if( dst_setting == NULL ) { group_name_used = group_name; } /* else find a new name for it */ else { new_group_name = __get_unused_group_on_path(dst_table, dst_file, path); group_name_used = new_group_name; } if( path != NULL ) { dst_setting = config_lookup(dst_config, path); } else { dst_setting = config_root_setting(dst_config); } dst_setting = config_setting_add(dst_setting,group_name_used,CONFIG_TYPE_GROUP); free(path); if(!entry_copy_config(src_setting,dst_setting)) { SDL_UnlockMutex(entry_mutex); return NULL; } SDL_UnlockMutex(entry_mutex); if(new_group_name != NULL ) { return new_group_name; } return strdup(group_name); }
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { VideoPicture *vp; AVPicture pict; /* wait until we have space for a new pic */ SDL_LockMutex(is->pictq_mutex); while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) return -1; // windex is set to 0 initially vp = &is->pictq[is->pictq_windex]; /* allocate or resize the buffer! */ // if(!vp->bmp || if(!vp->m_pFrame || vp->width != is->video_st->codec->width || vp->height != is->video_st->codec->height) { SDL_Event event; vp->allocated = 0; /* we have to do it in the main thread */ event.type = FF_ALLOC_EVENT; event.user.data1 = is; SDL_PushEvent(&event); /* wait until we have a picture allocated */ SDL_LockMutex(is->pictq_mutex); while(!vp->allocated && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) { return -1; } } /* We have a place to put our picture on the queue */ /* If we are skipping a frame, do we set this to null but still return vp->allocated = 1? */ // if(vp->bmp) { if(vp->m_pFrame){ // SDL_LockYUVOverlay(vp->bmp); /* point pict at the queue */ // pict.data[0] = vp->bmp->pixels[0]; // pict.data[1] = vp->bmp->pixels[2]; // pict.data[2] = vp->bmp->pixels[1]; // // pict.linesize[0] = vp->bmp->pitches[0]; // pict.linesize[1] = vp->bmp->pitches[2]; // pict.linesize[2] = vp->bmp->pitches[1]; // Convert the image into YUV format that SDL uses // sws_scale // ( // is->sws_ctx, // (uint8_t const * const *)pFrame->data, // pFrame->linesize, // 0, // is->video_st->codec->height, // pict.data, // pict.linesize // ); // SDL_UnlockYUVOverlay(vp->bmp); sws_scale(is->sws_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, is->video_st->codec->height, m_pFrameYUV->data, m_pFrameYUV->linesize); SDL_Rect rect; rect.x = 0; rect.y = 0; rect.w = is->video_st->codec->width; rect.h = is->video_st->codec->height; SDL_UpdateYUVTexture(m_pSdlTexture, &rect, m_pFrameYUV->data[0], m_pFrameYUV->linesize[0], m_pFrameYUV->data[1], m_pFrameYUV->linesize[1], m_pFrameYUV->data[2], m_pFrameYUV->linesize[2]); vp->pts = pts; /* now we inform our display thread that we have a pic ready */ if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_windex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; }
void ClientConnection::send_entity(Uint32 eid, const Entity *ent) { DataEntity dent = { eid, ent->world->difference(ent->get_x(), get_x()) * 64 + ent->get_offset_x(), ent->world->difference(ent->get_y(), get_y()) * 64 + ent->get_offset_y(), ent->get_z() + ent->get_offset_z(), ent->get_w(), ent->get_h(), ent->get_tex(), false }; // Check if the entity exists allready typeof(client_entities.begin()) it = client_entities.find(eid); if (it != client_entities.end()) { DataEntity &itde = it->second; // Check bounds if ((dent.x + dent.w < -1024 || dent.x > 1024 || dent.y + dent.h < -1024 || dent.y > 1024) && (itde.x + itde.w < -1024 || itde.x > 1024 || itde.y + itde.h < -1024 || itde.y > 1024)) return; // FIXME: If the entity moves across a corner, it will never be sent // If it exists send only modifications bool move = itde.x != dent.x || itde.y != dent.y || itde.z != dent.z; bool resize = itde.w != dent.w || itde.h != dent.h; bool change_tex = itde.tex != dent.tex; itde = dent; if (!move && !resize && !change_tex) return; // Nothing has changed, send nothing if (move && !resize && !change_tex) { // Only move the entity static const int len = 9; DataPacket packet = { len, new char[len] }; packet.data[0] = 'M'; SDLNet_Write16(dent.id, packet.data + 1); SDLNet_Write16(dent.x, packet.data + 3); SDLNet_Write16(dent.y, packet.data + 5); SDLNet_Write16(dent.z, packet.data + 7); SDL_LockMutex(send_mutex); data_to_send.push_back(packet); SDL_UnlockMutex(send_mutex); return; } if (!move && (resize || change_tex)) { // Only send width height and texture static const int len = 9; DataPacket packet = { len, new char[len] }; packet.data[0] = 'i'; SDLNet_Write16(dent.id, packet.data + 1); SDLNet_Write16(dent.w, packet.data + 3); SDLNet_Write16(dent.h, packet.data + 5); SDLNet_Write16(dent.tex, packet.data + 7); SDL_LockMutex(send_mutex); data_to_send.push_back(packet); SDL_UnlockMutex(send_mutex); return; } { // Otherwise send everything static const int len = 15; DataPacket packet = { len, new char[len] }; packet.data[0] = 'a'; SDLNet_Write16(dent.id, packet.data + 1); SDLNet_Write16(dent.x, packet.data + 3); SDLNet_Write16(dent.y, packet.data + 5); SDLNet_Write16(dent.z, packet.data + 7); SDLNet_Write16(dent.w, packet.data + 9); SDLNet_Write16(dent.h, packet.data + 11); SDLNet_Write16(dent.tex, packet.data + 13); SDL_LockMutex(send_mutex); data_to_send.push_back(packet); SDL_UnlockMutex(send_mutex); } } else { // If it's new, send it as a new entity // Check bounds if (dent.x + dent.w < -1024 || dent.x > 1024 || dent.y + dent.h < -1024 || dent.y > 1024) return; // Store it client_entities[eid] = dent; static const int len = 15; DataPacket packet = { len, new char[len] }; packet.data[0] = 'a'; SDLNet_Write16(dent.id, packet.data + 1); SDLNet_Write16(dent.x, packet.data + 3); SDLNet_Write16(dent.y, packet.data + 5); SDLNet_Write16(dent.z, packet.data + 7); SDLNet_Write16(dent.w, packet.data + 9); SDLNet_Write16(dent.h, packet.data + 11); SDLNet_Write16(dent.tex, packet.data + 13); SDL_LockMutex(send_mutex); data_to_send.push_back(packet); SDL_UnlockMutex(send_mutex); } }
~ScopedMutex() { SDL_UnlockMutex(mutex_); }
uint64_t CRtpByteStreamBase::rtp_ts_to_msec (uint32_t rtp_ts, uint64_t uts, uint64_t &wrap_offset) { uint64_t timetick; uint64_t adjusted_rtp_ts; uint64_t adjusted_wc_rtp_ts; bool have_wrap = false; uint32_t this_mask, last_mask; last_mask = m_last_rtp_ts & (1U << 31); this_mask = rtp_ts & (1U << 31); if (last_mask != this_mask) { if (this_mask == 0) { wrap_offset += (TO_U64(1) << 32); have_wrap = true; rtp_message(LOG_DEBUG, "%s - have wrap %x new %x", m_name, m_last_rtp_ts, rtp_ts); } else { // need to do something here } } if (m_stream_ondemand) { adjusted_rtp_ts = wrap_offset; adjusted_rtp_ts += rtp_ts; adjusted_wc_rtp_ts = m_base_rtp_ts; if (adjusted_wc_rtp_ts > adjusted_rtp_ts) { timetick = adjusted_wc_rtp_ts - adjusted_rtp_ts; timetick *= TO_U64(1000); timetick /= m_timescale; if (timetick > m_play_start_time) { timetick = 0; } else { timetick = m_play_start_time - timetick; } } else { timetick = adjusted_rtp_ts - adjusted_wc_rtp_ts; timetick *= TO_U64(1000); timetick /= m_timescale; timetick += m_play_start_time; } } else { // We've got a broadcast scenario here... if (m_have_first_pak_ts == false) { // We haven't processed the first packet yet - we record // the data here. m_first_pak_rtp_ts = rtp_ts; m_first_pak_ts = uts; m_have_first_pak_ts = true; rtp_message(LOG_DEBUG, "%s first pak ts %u "U64, m_name, m_first_pak_rtp_ts, m_first_pak_ts); // if we have received RTCP, set the wallclock offset, which // triggers the synchronization effort. if (m_rtcp_received) { // calculate other stuff //rtp_message(LOG_DEBUG, "%s rtp_ts_to_msec calling wallclock", m_name); set_wallclock_offset(m_rtcp_ts, m_rtcp_rtp_ts); } } SDL_LockMutex(m_rtp_packet_mutex); // fairly simple calculation to calculate the timestamp // based on this rtp timestamp, the first pak rtp timestamp and // the first packet timestamp. int32_t adder; int64_t ts_adder; if (have_wrap) { adder = rtp_ts - m_first_pak_rtp_ts; // adjust once an hour, to keep errors low // we'll adjust the timestamp and rtp timestamp ts_adder = (int64_t)adder; ts_adder *= TO_D64(1000); ts_adder /= (int64_t)m_timescale; m_first_pak_ts += ts_adder; m_first_pak_rtp_ts = rtp_ts; #ifdef DEBUG_RTP_BCAST rtp_message(LOG_DEBUG, "%s adjust for wrap - first pak ts is now "U64" rtp %u", m_name, m_first_pak_ts, m_first_pak_rtp_ts); #endif } // adder could be negative here, based on the RTCP we receive adder = rtp_ts - m_first_pak_rtp_ts; ts_adder = (int64_t)adder; ts_adder *= TO_D64(1000); ts_adder /= (int64_t)m_timescale; timetick = m_first_pak_ts; timetick += ts_adder; SDL_UnlockMutex(m_rtp_packet_mutex); #ifdef DEBUG_RTP_BCAST rtp_message(LOG_DEBUG, "%s ts %x base %x "U64" tp "U64" adder %d "D64, m_name, rtp_ts, m_first_pak_rtp_ts, m_first_pak_ts, timetick, adder, ts_adder); #endif } #ifdef DEBUG_RTP_TS rtp_message(LOG_DEBUG,"%s time "U64" %u", m_name, timetick, rtp_ts); #endif // record time m_last_rtp_ts = rtp_ts; m_last_realtime = timetick; return (timetick); }
/* * Sys_Mutex_Unlock */ void Sys_Mutex_Unlock( qmutex_t *mutex ) { SDL_UnlockMutex(mutex->m); }
void Graph::startTick() { SDL_LockMutex(mutex); data.push_back(std::vector<ParamState>(dim)); SDL_UnlockMutex(mutex); }
void Graphics::Update() { SDL_UnlockMutex(Graphics::ThreadData.graphMutex); }
void fe_mt_mutex_unlock(fe_mt_mutex *mutex) { SDL_UnlockMutex(*mutex); }
// called by the runner when a job completes void AsyncJobQueue::Finish(Job *job, const uint8_t threadIdx) { SDL_LockMutex(m_finishedLock[threadIdx]); m_finished[threadIdx].push_back(job); SDL_UnlockMutex(m_finishedLock[threadIdx]); }
static int reader_thread_loop(void *dummy) { static uint8_t *readbuf = NULL; static int readbuf_size = 256; int readbuf_len = 0; int header_len = 0; int cmd_len = -1; if (!readbuf) { readbuf = emalloc(readbuf_size); } while (!abort_thread) { int toread; /* First, try to read a command length sequence */ if (readbuf_len < 2) { /* Three-byte length? */ if (readbuf_len > 0 && (readbuf[0] & 0x80)) { toread = 3 - readbuf_len; } else { toread = 2 - readbuf_len; } } else if (readbuf_len == 2 && (readbuf[0] & 0x80)) { toread = 1; } else { /* If we have a finished header, get the packet size from it. */ if (readbuf_len <= 3) { uint8_t *p = readbuf; header_len = (*p & 0x80) ? 3 : 2; cmd_len = 0; if (header_len == 3) { cmd_len += ((int) (*p++) & 0x7f) << 16; } cmd_len += ((int) (*p++)) << 8; cmd_len += ((int) (*p++)); } toread = cmd_len + header_len - readbuf_len; if (readbuf_len + toread > readbuf_size) { uint8_t *tmp = readbuf; readbuf_size = readbuf_len + toread; readbuf = emalloc(readbuf_size); memcpy(readbuf, tmp, readbuf_len); efree(tmp); } } size_t amt; bool success = socket_read(csocket.sc, (void *) (readbuf + readbuf_len), toread, &amt); if (!success) { break; } readbuf_len += amt; network_graph_update(NETWORK_GRAPH_TYPE_GAME, NETWORK_GRAPH_TRAFFIC_RX, amt); /* Finished with a command? */ if (readbuf_len == cmd_len + header_len && !abort_thread) { command_buffer *buf = command_buffer_new(readbuf_len - header_len, readbuf + header_len); SDL_LockMutex(input_buffer_mutex); command_buffer_enqueue(buf, &input_queue_start, &input_queue_end); SDL_CondSignal(input_buffer_cond); SDL_UnlockMutex(input_buffer_mutex); cmd_len = -1; header_len = 0; readbuf_len = 0; } } client_socket_close(&csocket); if (readbuf != NULL) { efree(readbuf); readbuf = NULL; } return -1; }
/* ================== Sys_LeaveCriticalSection ================== */ void Sys_LeaveCriticalSection(int index) { assert(index >= 0 && index < MAX_CRITICAL_SECTIONS); if (SDL_UnlockMutex(mutex[index]) != 0) common->Error("ERROR: SDL_UnlockMutex failed\n"); }
/* * The bulk of the Sound_NewSample() work is done here... * Ask the specified decoder to handle the data in (rw), and if * so, construct the Sound_Sample. Otherwise, try to wind (rw)'s stream * back to where it was, and return false. */ static int init_sample(const Sound_DecoderFunctions *funcs, Sound_Sample *sample, const char *ext, Sound_AudioInfo *_desired) { Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque; Sound_AudioInfo desired; int pos = SDL_RWtell(internal->rw); /* fill in the funcs for this decoder... */ sample->decoder = &funcs->info; internal->funcs = funcs; if (!funcs->open(sample, ext)) { SDL_RWseek(internal->rw, pos, SEEK_SET); /* set for next try... */ return(0); } /* if */ /* success; we've got a decoder! */ /* Now we need to set up the conversion buffer... */ memcpy(&desired, (_desired != NULL) ? _desired : &sample->actual, sizeof (Sound_AudioInfo)); if (desired.format == 0) desired.format = sample->actual.format; if (desired.channels == 0) desired.channels = sample->actual.channels; if (desired.rate == 0) desired.rate = sample->actual.rate; if (Sound_BuildAudioCVT(&internal->sdlcvt, sample->actual.format, sample->actual.channels, sample->actual.rate, desired.format, desired.channels, desired.rate, sample->buffer_size) == -1) { __Sound_SetError(SDL_GetError()); funcs->close(sample); SDL_RWseek(internal->rw, pos, SEEK_SET); /* set for next try... */ return(0); } /* if */ if (internal->sdlcvt.len_mult > 1) { void *rc = realloc(sample->buffer, sample->buffer_size * internal->sdlcvt.len_mult); if (rc == NULL) { funcs->close(sample); SDL_RWseek(internal->rw, pos, SEEK_SET); /* set for next try... */ return(0); } /* if */ sample->buffer = rc; } /* if */ /* these pointers are all one and the same. */ memcpy(&sample->desired, &desired, sizeof (Sound_AudioInfo)); internal->sdlcvt.buf = internal->buffer = sample->buffer; internal->buffer_size = sample->buffer_size / internal->sdlcvt.len_mult; internal->sdlcvt.len = internal->buffer_size; /* Prepend our new Sound_Sample to the sample_list... */ SDL_LockMutex(samplelist_mutex); internal->next = sample_list; if (sample_list != NULL) ((Sound_SampleInternal *) sample_list->opaque)->prev = sample; sample_list = sample; SDL_UnlockMutex(samplelist_mutex); SNDDBG(("New sample DESIRED format: %s format, %d rate, %d channels.\n", fmt_to_str(sample->desired.format), sample->desired.rate, sample->desired.channels)); SNDDBG(("New sample ACTUAL format: %s format, %d rate, %d channels.\n", fmt_to_str(sample->actual.format), sample->actual.rate, sample->actual.channels)); SNDDBG(("On-the-fly conversion: %s.\n", internal->sdlcvt.needed ? "ENABLED" : "DISABLED")); return(1); } /* init_sample */
// For performance, we're using multiple threads so that the game state can // be updating in the background while openGL renders. // The general plan is: // 1. Vsync happens. Everything begins. // 2. Renderthread activates. (The update thread is currently blocked.) // 3. Renderthread dumps everything into opengl, via RenderAllEntities. (And // any other similar calls, such as calls to IMGUI) Updatethread is // still blocked. When this is complete, OpenGL now has its own copy of // everything, and we can safely change world state data. // 4. Renderthread signals updatethread to wake up. // 5a.Renderthread calls gl_flush, (via Renderer.advanceframe) and waits for // everything render. Once complete, it goes to sleep and waits for the // next vsync event. // 5b.Updatethread goes and updates the game state and gets us all ready for // next frame. Once complete, it also goes to sleep and waits for the next // vsync event. void Game::Run() { // Start the update thread: UpdateThreadData rt_data(&game_exiting_, &world_, &state_machine_, &renderer_, &input_, &audio_engine_, &sync_); input_.AdvanceFrame(&renderer_.window_size()); state_machine_.AdvanceFrame(16); SDL_Thread* update_thread = SDL_CreateThread(UpdateThread, "Zooshi Update Thread", &rt_data); if (!update_thread) { LogError("Error creating update thread."); assert(false); } #if DISPLAY_FRAMERATE_HISTOGRAM for (int i = 0; i < kHistogramSize; i++) { histogram[i] = 0; } last_printout = 0; #endif // DISPLAY_FRAMERATE_HISTOGRAM // variables used for regulating our framerate: // Total size of our history, in frames: const int kHistorySize = 60 * 5; // Max number of frames we can have dropped in our history, before we // switch to queue-stuffing mode, and ignore vsync pauses. const int kMaxDroppedFrames = 3; // Variable bool missed_frame_history[kHistorySize]; for (int i = 0; i < kHistorySize; i++) { missed_frame_history[i] = false; } int history_index = 0; int total_dropped_frames = 0; global_vsync_context = &sync_; #ifdef __ANDROID__ fplbase::RegisterVsyncCallback(HandleVsync); #else // We don't need this on android because we'll just get vsync events directly. SDL_Thread* vsync_simulator_thread = SDL_CreateThread( VsyncSimulatorThread, "Zooshi Simulated Vsync Thread", nullptr); if (!vsync_simulator_thread) { LogError("Error creating vsync simulator thread."); assert(false); } #endif // __ANDROID__ int last_frame_id = 0; // We basically own the lock all the time, except when we're waiting // for a vsync event. SDL_LockMutex(sync_.renderthread_mutex_); while (!game_exiting_) { #ifdef __ANDROID__ int current_frame_id = fplbase::GetVsyncFrameId(); #else int current_frame_id = 0; #endif // __ANDROID__ // Update our framerate history: // The oldest value falls off and is replaced with the most recent frame. // Also, we update our counts. if (missed_frame_history[history_index]) { total_dropped_frames--; } // We count it as a dropped frame if more than one vsync event passed since // we started rendering it. The check is implemented via equality // comparisons because current_frame_id will eventually wrap.) missed_frame_history[history_index] = (current_frame_id != last_frame_id + 1) && (current_frame_id != last_frame_id); if (missed_frame_history[history_index]) { total_dropped_frames++; } history_index = (history_index + 1) % kHistorySize; last_frame_id = current_frame_id; // ------------------------------------------- // Steps 1, 2. // Wait for start of frame. (triggered at vsync start on android.) // For performance, we only wait if we're not dropping frames. Otherwise, // we just keep rendering as fast as we can and stuff the render queue. if (total_dropped_frames <= kMaxDroppedFrames) { SDL_CondWait(sync_.start_render_cv_, sync_.renderthread_mutex_); } // Grab the lock to make sure the game isn't still updating. SDL_LockMutex(sync_.gameupdate_mutex_); SystraceBegin("RenderFrame"); // Input update must happen from the render thread. // From the SDL documentation on SDL_PollEvent(), // https://wiki.libsdl.org/SDL_PollEvent): // "As this function implicitly calls SDL_PumpEvents(), you can only call // this function in the thread that set the video mode." SystraceBegin("Input::AdvanceFrame()"); input_.AdvanceFrame(&renderer_.window_size()); game_exiting_ |= input_.exit_requested(); SystraceEnd(); // Milliseconds elapsed since last update. rt_data.frame_start = CurrentWorldTimeSubFrame(input_); // ------------------------------------------- // Step 3. // Render everything. // ------------------------------------------- SystraceBegin("StateMachine::Render()"); fplbase::RenderTarget::ScreenRenderTarget(renderer_).SetAsRenderTarget(); renderer_.ClearDepthBuffer(); renderer_.SetCulling(fplbase::Renderer::kCullBack); state_machine_.Render(&renderer_); SystraceEnd(); SDL_UnlockMutex(sync_.gameupdate_mutex_); SystraceBegin("StateMachine::HandleUI()"); state_machine_.HandleUI(&renderer_); SystraceEnd(); // ------------------------------------------- // Step 4. // Signal the update thread that it is safe to start messing with // data, now that we've already handed it all off to openGL. // ------------------------------------------- SDL_CondBroadcast(sync_.start_update_cv_); // ------------------------------------------- // Step 5a. // Start openGL actually rendering. AdvanceFrame will (among other things) // trigger a gl_flush. This thread will block until it is completed, // but that's ok because the update thread is humming in the background // preparing the worlds tate for next frame. // ------------------------------------------- SystraceBegin("AdvanceFrame"); renderer_.AdvanceFrame(input_.minimized(), input_.Time()); SystraceEnd(); // AdvanceFrame SystraceEnd(); // RenderFrame gpg_manager_.Update(); // Process input device messages since the last game loop. // Update render window size. if (input_.GetButton(fplbase::FPLK_BACKQUOTE).went_down()) { ToggleRelativeMouseMode(); } int new_time = CurrentWorldTimeSubFrame(input_); int frame_time = new_time - rt_data.frame_start; #if DISPLAY_FRAMERATE_HISTOGRAM UpdateProfiling(frame_time); #endif // DISPLAY_FRAMERATE_HISTOGRAM SystraceCounter("FrameTime", frame_time); } SDL_UnlockMutex(sync_.renderthread_mutex_); // Clean up asynchronous callbacks to prevent crashing on garbage data. #ifdef __ANDROID__ fplbase::RegisterVsyncCallback(nullptr); #endif // __ANDROID__ input_.AddAppEventCallback(nullptr); }
static int drain_output_buffer_l(JNIEnv *env, IJKFF_Pipenode *node, int64_t timeUs, int *dequeue_count) { IJKFF_Pipenode_Opaque *opaque = node->opaque; int ret = 0; SDL_AMediaCodecBufferInfo bufferInfo; ssize_t output_buffer_index = 0; if (dequeue_count) *dequeue_count = 0; if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) { ALOGE("%s:create: SetupThreadEnv failed\n", __func__); return -1; } output_buffer_index = SDL_AMediaCodec_dequeueOutputBuffer(opaque->acodec, &bufferInfo, timeUs); if (output_buffer_index == AMEDIACODEC__INFO_OUTPUT_BUFFERS_CHANGED) { ALOGI("AMEDIACODEC__INFO_OUTPUT_BUFFERS_CHANGED\n"); // continue; } else if (output_buffer_index == AMEDIACODEC__INFO_OUTPUT_FORMAT_CHANGED) { ALOGI("AMEDIACODEC__INFO_OUTPUT_FORMAT_CHANGED\n"); SDL_AMediaFormat_deleteP(&opaque->output_aformat); opaque->output_aformat = SDL_AMediaCodec_getOutputFormat(opaque->acodec); if (opaque->output_aformat) { int width = 0; int height = 0; int color_format = 0; int stride = 0; int slice_height = 0; int crop_left = 0; int crop_top = 0; int crop_right = 0; int crop_bottom = 0; SDL_AMediaFormat_getInt32(opaque->output_aformat, "width", &width); SDL_AMediaFormat_getInt32(opaque->output_aformat, "height", &height); SDL_AMediaFormat_getInt32(opaque->output_aformat, "color-format", &color_format); SDL_AMediaFormat_getInt32(opaque->output_aformat, "stride", &stride); SDL_AMediaFormat_getInt32(opaque->output_aformat, "slice-height", &slice_height); SDL_AMediaFormat_getInt32(opaque->output_aformat, "crop-left", &crop_left); SDL_AMediaFormat_getInt32(opaque->output_aformat, "crop-top", &crop_top); SDL_AMediaFormat_getInt32(opaque->output_aformat, "crop-right", &crop_right); SDL_AMediaFormat_getInt32(opaque->output_aformat, "crop-bottom", &crop_bottom); // TI decoder could crash after reconfigure // ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, width, height); // opaque->frame_width = width; // opaque->frame_height = height; ALOGI( "AMEDIACODEC__INFO_OUTPUT_FORMAT_CHANGED\n" " width-height: (%d x %d)\n" " color-format: (%s: 0x%x)\n" " stride: (%d)\n" " slice-height: (%d)\n" " crop: (%d, %d, %d, %d)\n" , width, height, SDL_AMediaCodec_getColorFormatName(color_format), color_format, stride, slice_height, crop_left, crop_top, crop_right, crop_bottom); } // continue; } else if (output_buffer_index == AMEDIACODEC__INFO_TRY_AGAIN_LATER) { AMCTRACE("AMEDIACODEC__INFO_TRY_AGAIN_LATER\n"); // continue; } else if (output_buffer_index < 0) { // enqueue packet as a fake picture PacketQueue *fake_q = &opaque->fake_pictq; SDL_LockMutex(fake_q->mutex); if (!fake_q->abort_request && fake_q->nb_packets <= 0) { SDL_CondWaitTimeout(fake_q->cond, fake_q->mutex, 1000); } SDL_UnlockMutex(fake_q->mutex); if (fake_q->abort_request) { ret = -1; goto fail; } else { AVPacket pkt; if (ffp_packet_queue_get(&opaque->fake_pictq, &pkt, 1, &opaque->fake_pictq_serial) < 0) { ret = -1; goto fail; } else { if (!ffp_is_flush_packet(&pkt)) { if (dequeue_count) ++*dequeue_count; ret = amc_queue_picture_fake(node, &pkt); av_free_packet(&pkt); } ret = 0; goto fail; } } } else if (output_buffer_index >= 0) { if (dequeue_count) ++*dequeue_count; if (opaque->n_buf_out) { AMC_Buf_Out *buf_out; if (opaque->off_buf_out < opaque->n_buf_out) { // ALOGD("filling buffer... %d", opaque->off_buf_out); buf_out = &opaque->amc_buf_out[opaque->off_buf_out++]; buf_out->port = output_buffer_index; buf_out->info = bufferInfo; buf_out->pts = pts_from_buffer_info(node, &bufferInfo); sort_amc_buf_out(opaque->amc_buf_out, opaque->off_buf_out); } else { double pts; pts = pts_from_buffer_info(node, &bufferInfo); if (opaque->last_queued_pts != AV_NOPTS_VALUE && pts < opaque->last_queued_pts) { // ALOGE("early picture, drop!"); SDL_AMediaCodec_releaseOutputBuffer(opaque->acodec, output_buffer_index, false); goto done; } /* already sorted */ buf_out = &opaque->amc_buf_out[opaque->off_buf_out - 1]; /* new picture is the most aged, send now */ if (pts < buf_out->pts) { ret = amc_queue_picture_buffer(node, output_buffer_index, &bufferInfo); opaque->last_queued_pts = pts; // ALOGD("pts = %f", pts); } else { int i; /* find one to send */ for (i = opaque->off_buf_out - 1; i >= 0; i--) { buf_out = &opaque->amc_buf_out[i]; if (pts > buf_out->pts) { ret = amc_queue_picture_buffer(node, buf_out->port, &buf_out->info); opaque->last_queued_pts = buf_out->pts; // ALOGD("pts = %f", buf_out->pts); /* replace for sort later */ buf_out->port = output_buffer_index; buf_out->info = bufferInfo; buf_out->pts = pts_from_buffer_info(node, &bufferInfo); sort_amc_buf_out(opaque->amc_buf_out, opaque->n_buf_out); break; } } /* need to discard current buffer */ if (i < 0) { // ALOGE("buffer too small, drop picture!"); SDL_AMediaCodec_releaseOutputBuffer(opaque->acodec, output_buffer_index, false); goto done; } } } } else { ret = amc_queue_picture_buffer(node, output_buffer_index, &bufferInfo); } } done: ret = 0; fail: return ret; }
static int aout_thread_n(JNIEnv *env, SDL_Aout *aout) { SDL_Aout_Opaque *opaque = aout->opaque; SDL_Android_AudioTrack *atrack = opaque->atrack; SDL_AudioCallback audio_cblk = opaque->spec.callback; void *userdata = opaque->spec.userdata; uint8_t *buffer = opaque->buffer; int copy_size = 256; assert(atrack); assert(buffer); SDL_SetThreadPriority(SDL_THREAD_PRIORITY_HIGH); if (!opaque->abort_request && !opaque->pause_on) SDL_Android_AudioTrack_play(env, atrack); while (!opaque->abort_request) { SDL_LockMutex(opaque->wakeup_mutex); if (!opaque->abort_request && opaque->pause_on) { SDL_Android_AudioTrack_pause(env, atrack); while (!opaque->abort_request && opaque->pause_on) { SDL_CondWaitTimeout(opaque->wakeup_cond, opaque->wakeup_mutex, 1000); } if (!opaque->abort_request && !opaque->pause_on) SDL_Android_AudioTrack_play(env, atrack); } if (opaque->need_flush) { opaque->need_flush = 0; SDL_Android_AudioTrack_flush(env, atrack); } if (opaque->need_set_volume) { opaque->need_set_volume = 0; SDL_Android_AudioTrack_set_volume(env, atrack, opaque->left_volume, opaque->right_volume); } if (opaque->speed_changed) { opaque->speed_changed = 0; if (AirStash_GetSystemAndroidApiLevel(env) >= 23) { SDL_Android_AudioTrack_setSpeed(env, atrack, opaque->speed); } } SDL_UnlockMutex(opaque->wakeup_mutex); audio_cblk(userdata, buffer, copy_size); if (opaque->need_flush) { SDL_Android_AudioTrack_flush(env, atrack); opaque->need_flush = false; } if (opaque->need_flush) { opaque->need_flush = 0; SDL_Android_AudioTrack_flush(env, atrack); } else { int written = SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size); if (written != copy_size) { ALOGW("AudioTrack: not all data copied %d/%d", (int)written, (int)copy_size); } } // TODO: 1 if callback return -1 or 0 } SDL_Android_AudioTrack_free(env, atrack); return 0; }
static int func_run_sync(IJKFF_Pipenode *node) { JNIEnv *env = NULL; IJKFF_Pipenode_Opaque *opaque = node->opaque; FFPlayer *ffp = opaque->ffp; VideoState *is = ffp->is; Decoder *d = &is->viddec; PacketQueue *q = d->queue; int ret = 0; int dequeue_count = 0; if (!opaque->acodec) { return ffp_video_thread(ffp); } if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) { ALOGE("%s: SetupThreadEnv failed\n", __func__); return -1; } opaque->frame_width = opaque->avctx->width; opaque->frame_height = opaque->avctx->height; opaque->enqueue_thread = SDL_CreateThreadEx(&opaque->_enqueue_thread, enqueue_thread_func, node, "amediacodec_input_thread"); if (!opaque->enqueue_thread) { ALOGE("%s: SDL_CreateThreadEx failed\n", __func__); ret = -1; goto fail; } while (!q->abort_request) { int64_t timeUs = opaque->acodec_first_dequeue_output_request ? 0 : AMC_OUTPUT_TIMEOUT_US; ret = drain_output_buffer(env, node, timeUs, &dequeue_count); if (opaque->acodec_first_dequeue_output_request) { SDL_LockMutex(opaque->acodec_first_dequeue_output_mutex); opaque->acodec_first_dequeue_output_request = false; SDL_CondSignal(opaque->acodec_first_dequeue_output_cond); SDL_UnlockMutex(opaque->acodec_first_dequeue_output_mutex); } if (ret != 0) { ret = -1; goto fail; } } fail: ffp_packet_queue_abort(&opaque->fake_pictq); if (opaque->n_buf_out) { int i; if (opaque->acodec) { for (i = 0; i < opaque->n_buf_out; i++) { if (opaque->amc_buf_out[i].pts != AV_NOPTS_VALUE) SDL_AMediaCodec_releaseOutputBuffer(opaque->acodec, opaque->amc_buf_out[i].port, false); } } free(opaque->amc_buf_out); opaque->n_buf_out = 0; opaque->amc_buf_out = NULL; opaque->off_buf_out = 0; opaque->last_queued_pts = AV_NOPTS_VALUE; } if (opaque->acodec) SDL_AMediaCodec_stop(opaque->acodec); SDL_WaitThread(opaque->enqueue_thread, NULL); ALOGI("MediaCodec: %s: exit: %d", __func__, ret); return ret; #if 0 fallback_to_ffplay: ALOGW("fallback to ffplay decoder\n"); return ffp_video_thread(opaque->ffp); #endif }
bool ZCore::CheckRegistration() { static SDL_mutex *check_mutex = SDL_CreateMutex(); FILE *fp; int ret; char buf_enc[16]; char buf_key[16]; #ifdef DISABLE_REGCHECK printf("<<<< ---------------------------------------------------------------- >>>>\n"); printf("<<<< REGISTRATION CHECKING DISABLED >>>>\n"); printf("<<<< ---------------------------------------------------------------- >>>>\n"); is_registered = true; return is_registered; #endif //clients and servers on different threads may use this function SDL_LockMutex(check_mutex); fp = fopen("registration.zkey", "r"); if(!fp) { printf(">>>> ---------------------------------------------------------------- <<<<\n"); printf(">>>> no registration key file found <<<<\n"); printf(">>>> please visit www.nighsoft.com on how to register your zod engine <<<<\n"); printf(">>>> ---------------------------------------------------------------- <<<<\n"); SDL_UnlockMutex(check_mutex); return false; } ret = fread(buf_enc, 1, 16, fp); if(ret == 16) { char buf_mac[16]; //clear memset(buf_mac, 0, 16); //get our mac SocketHandler::GetMAC(buf_mac); //key stored reg key zencrypt.AES_Decrypt(buf_enc,16,buf_key); //check key is_registered = true; for(int i=0;i<16;i++) if(buf_mac[i] != buf_key[i]) { printf(">>>> ---------------------------------------------------------------- <<<<\n"); printf(">>>> registration key invalid <<<<\n"); printf(">>>> please visit www.nighsoft.com on how to register your zod engine <<<<\n"); printf(">>>> ---------------------------------------------------------------- <<<<\n"); is_registered = false; break; } } else { printf(">>>> ---------------------------------------------------------------- <<<<\n"); printf(">>>> registration key file unreadable or corrupt <<<<\n"); printf(">>>> please visit www.nighsoft.com on how to register your zod engine <<<<\n"); printf(">>>> ---------------------------------------------------------------- <<<<\n"); } fclose(fp); SDL_UnlockMutex(check_mutex); return is_registered; }
void vsMutex::Unlock() { SDL_UnlockMutex(m_mutex); }
void SDL_VoutAndroid_invalidateAllBuffers(SDL_Vout *vout) { SDL_LockMutex(vout->mutex); SDL_VoutAndroid_invalidateAllBuffers_l(vout); SDL_UnlockMutex(vout->mutex); }
/** ** Callback for when music has finished ** Note: we are in the sdl audio thread */ static void MusicFinishedCallback() { SDL_LockMutex(MusicFinishedMutex); MusicFinished = true; SDL_UnlockMutex(MusicFinishedMutex); }
void SDL_VoutAndroid_SetNativeWindow(SDL_Vout *vout, ANativeWindow *native_window) { SDL_LockMutex(vout->mutex); SDL_VoutAndroid_SetNativeWindow_l(vout, native_window); SDL_UnlockMutex(vout->mutex); }
void CRtpByteStreamBase::set_wallclock_offset (uint64_t wclock, uint32_t rtp_ts) { int32_t rtp_ts_diff; int64_t wclock_diff; uint64_t wclock_calc; bool set = true; bool had_recvd_rtcp; if (m_rtcp_received == 1 /*&& m_stream_ondemand == 0*/) { rtp_ts_diff = rtp_ts; rtp_ts_diff -= m_rtcp_rtp_ts; wclock_diff = (int64_t)rtp_ts_diff; wclock_diff *= TO_D64(1000); wclock_diff /= (int64_t)m_timescale; wclock_calc = m_rtcp_ts; wclock_calc += wclock_diff; set = false; if (wclock_calc != wclock) { #ifdef DEBUG_RTP_WCLOCK rtp_message(LOG_DEBUG, "%s - set wallclock - wclock should be "U64" is "U64, m_name, wclock_calc, wclock); #endif // don't change wclock offset if it's > 100 msec - otherwise, // it's annoying noise int64_t diff = wclock_calc - wclock; if (abs(diff) > 2 && abs(diff) < 100) { set = false; // rtp_message(LOG_DEBUG, "not changing"); // we'll allow a msec drift here or there to allow for rounding - // we want this to change every so often } } } had_recvd_rtcp = m_rtcp_received; m_rtcp_received = true; SDL_LockMutex(m_rtp_packet_mutex); if (set) { m_rtcp_ts = wclock; m_rtcp_rtp_ts = rtp_ts; } if (m_have_first_pak_ts) { // we only want positives here int32_t diff; diff = rtp_ts - m_first_pak_rtp_ts; int32_t compare = 3600 * m_timescale; #ifdef DEBUG_RTP_WCLOCK rtp_message(LOG_DEBUG, "%s - 1st rtp ts %u rtp %u %u", m_name, m_first_pak_rtp_ts, rtp_ts, diff); rtp_message(LOG_DEBUG, "%s - 1st ts "U64, m_name, m_first_pak_ts); #endif if (diff > compare) { // adjust once an hour, to keep errors low // we'll adjust the timestamp and rtp timestamp int64_t ts_diff; ts_diff = (int64_t)diff; ts_diff *= TO_U64(1000); ts_diff /= (int64_t)m_timescale; m_first_pak_ts += ts_diff; m_first_pak_rtp_ts += diff; #ifdef DEBUG_RTP_WCLOCK rtp_message(LOG_DEBUG, "CHANGE %s - first pak ts is now "U64" rtp %u", m_name, m_first_pak_ts, m_first_pak_rtp_ts); #endif } // We've received an RTCP - see if we need to syncronize // the video streams. if (m_psptr != NULL) { rtcp_sync_t sync; sync.first_pak_ts = m_first_pak_ts; sync.first_pak_rtp_ts = m_first_pak_rtp_ts; sync.rtcp_ts = m_rtcp_ts; sync.rtcp_rtp_ts = m_rtcp_rtp_ts; sync.timescale = m_timescale; m_psptr->synchronize_rtp_bytestreams(&sync); } else { // if this is our first rtcp, try to synchronize if (!had_recvd_rtcp) synchronize(NULL); } } SDL_UnlockMutex(m_rtp_packet_mutex); }
void SDL_VoutAndroid_setAMediaCodec(SDL_Vout *vout, SDL_AMediaCodec *acodec) { SDL_LockMutex(vout->mutex); SDL_VoutAndroid_setAMediaCodec_l(vout, acodec); SDL_UnlockMutex(vout->mutex); }
void wzMutexUnlock(WZ_MUTEX *mutex) { SDL_UnlockMutex((SDL_mutex *)mutex); }
SDL_TimerID SDL_AddTimer(Uint32 interval, SDL_TimerCallback callback, void *param) { SDL_TimerData *data = &SDL_timer_data; SDL_Timer *timer; SDL_TimerMap *entry; SDL_AtomicLock(&data->lock); if (!SDL_AtomicGet(&data->active)) { if (SDL_TimerInit() < 0) { SDL_AtomicUnlock(&data->lock); return 0; } } timer = data->freelist; if (timer) { data->freelist = timer->next; } SDL_AtomicUnlock(&data->lock); if (timer) { SDL_RemoveTimer(timer->timerID); } else { timer = (SDL_Timer *)SDL_malloc(sizeof(*timer)); if (!timer) { SDL_OutOfMemory(); return 0; } } timer->timerID = SDL_AtomicIncRef(&data->nextID); timer->callback = callback; timer->param = param; timer->interval = interval; timer->scheduled = SDL_GetTicks() + interval; SDL_AtomicSet(&timer->canceled, 0); entry = (SDL_TimerMap *)SDL_malloc(sizeof(*entry)); if (!entry) { SDL_free(timer); SDL_OutOfMemory(); return 0; } entry->timer = timer; entry->timerID = timer->timerID; SDL_LockMutex(data->timermap_lock); entry->next = data->timermap; data->timermap = entry; SDL_UnlockMutex(data->timermap_lock); /* Add the timer to the pending list for the timer thread */ SDL_AtomicLock(&data->lock); timer->next = data->pending; data->pending = timer; SDL_AtomicUnlock(&data->lock); /* Wake up the timer thread if necessary */ SDL_SemPost(data->sem); return entry->timerID; }
/** * @sa Qcommon_Frame */ void NET_Wait (int timeout) { struct timeval tv; int ready; fd_set read_fds_out; fd_set write_fds_out; memcpy(&read_fds_out, &read_fds, sizeof(read_fds_out)); memcpy(&write_fds_out, &write_fds, sizeof(write_fds_out)); /* select() won't notice that loopback streams are ready, so we'll * eliminate the delay directly */ if (loopback_ready) timeout = 0; tv.tv_sec = timeout / 1000; tv.tv_usec = 1000 * (timeout % 1000); #ifdef _WIN32 if (read_fds_out.fd_count == 0) { Sys_Sleep(timeout); ready = 0; } else #endif ready = select(maxfd, &read_fds_out, &write_fds_out, nullptr, &tv); if (ready == -1) { Com_Printf("select failed: %s\n", netStringError(netError)); return; } if (ready == 0 && !loopback_ready) return; if (server_socket != INVALID_SOCKET && FD_ISSET(server_socket, &read_fds_out)) { const SOCKET client_socket = accept(server_socket, nullptr, 0); if (client_socket == INVALID_SOCKET) { if (errno != EAGAIN) Com_Printf("accept on socket %d failed: %s\n", server_socket, netStringError(netError)); } else do_accept(client_socket); } for (int i = 0; i < MAX_STREAMS; i++) { struct net_stream* s = streams[i]; if (!s) continue; if (s->loopback) { /* If the peer is gone and the buffer is empty, close the stream */ if (!s->loopback_peer && NET_StreamGetLength(s) == 0) { NET_StreamClose(s); } /* Note that s is potentially invalid after the callback returns - we'll close dead streams on the next pass */ else if (s->ready && s->func) { s->func(s); } continue; } if (s->socket == INVALID_SOCKET) continue; if (FD_ISSET(s->socket, &write_fds_out)) { if (dbuffer_len(s->outbound) == 0) { FD_CLR(s->socket, &write_fds); /* Finished streams are closed when their outbound queues empty */ if (s->finished) NET_StreamClose(s); continue; } char buf[4096]; int len; { const ScopedMutex scopedMutex(netMutex); len = s->outbound->get(buf, sizeof(buf)); len = send(s->socket, buf, len, 0); s->outbound->remove(len); } if (len < 0) { Com_Printf("write on socket %d failed: %s\n", s->socket, netStringError(netError)); NET_StreamClose(s); continue; } Com_DPrintf(DEBUG_SERVER, "wrote %d bytes to stream %d (%s)\n", len, i, NET_StreamPeerToName(s, buf, sizeof(buf), true)); } if (FD_ISSET(s->socket, &read_fds_out)) { char buf[4096]; const int len = recv(s->socket, buf, sizeof(buf), 0); if (len <= 0) { if (len == -1) Com_Printf("read on socket %d failed: %s\n", s->socket, netStringError(netError)); NET_StreamClose(s); continue; } else { if (s->inbound) { SDL_LockMutex(netMutex); s->inbound->add(buf, len); SDL_UnlockMutex(netMutex); Com_DPrintf(DEBUG_SERVER, "read %d bytes from stream %d (%s)\n", len, i, NET_StreamPeerToName(s, buf, sizeof(buf), true)); /* Note that s is potentially invalid after the callback returns */ if (s->func) s->func(s); continue; } } } } for (int i = 0; i < MAX_DATAGRAM_SOCKETS; i++) { struct datagram_socket* s = datagram_sockets[i]; if (!s) continue; if (FD_ISSET(s->socket, &write_fds_out)) { if (s->queue) { struct datagram* dgram = s->queue; const int len = sendto(s->socket, dgram->msg, dgram->len, 0, (struct sockaddr* )dgram->addr, s->addrlen); if (len == -1) Com_Printf("sendto on socket %d failed: %s\n", s->socket, netStringError(netError)); /* Regardless of whether it worked, we don't retry datagrams */ s->queue = dgram->next; Mem_Free(dgram->msg); Mem_Free(dgram->addr); Mem_Free(dgram); if (!s->queue) s->queue_tail = &s->queue; } else { FD_CLR(s->socket, &write_fds); } } if (FD_ISSET(s->socket, &read_fds_out)) { char buf[256]; char addrbuf[256]; socklen_t addrlen = sizeof(addrbuf); const int len = recvfrom(s->socket, buf, sizeof(buf), 0, (struct sockaddr* )addrbuf, &addrlen); if (len == -1) Com_Printf("recvfrom on socket %d failed: %s\n", s->socket, netStringError(netError)); else s->func(s, buf, len, (struct sockaddr* )addrbuf); } } loopback_ready = false; }
static int feed_input_buffer(JNIEnv *env, IJKFF_Pipenode *node, int64_t timeUs, int *enqueue_count) { IJKFF_Pipenode_Opaque *opaque = node->opaque; FFPlayer *ffp = opaque->ffp; IJKFF_Pipeline *pipeline = opaque->pipeline; VideoState *is = ffp->is; Decoder *d = &is->viddec; PacketQueue *q = d->queue; sdl_amedia_status_t amc_ret = 0; int ret = 0; ssize_t input_buffer_index = 0; uint8_t* input_buffer_ptr = NULL; size_t input_buffer_size = 0; size_t copy_size = 0; int64_t time_stamp = 0; if (enqueue_count) *enqueue_count = 0; if (d->queue->abort_request) { ret = 0; goto fail; } if (!d->packet_pending || d->queue->serial != d->pkt_serial) { #if AMC_USE_AVBITSTREAM_FILTER #else H264ConvertState convert_state = {0, 0}; #endif AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (ffp_packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0) { ret = -1; goto fail; } if (ffp_is_flush_packet(&pkt) || opaque->acodec_flush_request) { // request flush before lock, or never get mutex opaque->acodec_flush_request = true; SDL_LockMutex(opaque->acodec_mutex); if (SDL_AMediaCodec_isStarted(opaque->acodec)) { if (opaque->input_packet_count > 0) { // flush empty queue cause error on OMX.SEC.AVC.Decoder (Nexus S) SDL_AMediaCodec_flush(opaque->acodec); opaque->input_packet_count = 0; } // If codec is configured in synchronous mode, codec will resume automatically // SDL_AMediaCodec_start(opaque->acodec); } opaque->acodec_flush_request = false; SDL_CondSignal(opaque->acodec_cond); SDL_UnlockMutex(opaque->acodec_mutex); d->finished = 0; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (ffp_is_flush_packet(&pkt) || d->queue->serial != d->pkt_serial); av_free_packet(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1; #if AMC_USE_AVBITSTREAM_FILTER // d->pkt_temp->data could be allocated by av_bitstream_filter_filter if (d->bfsc_ret > 0) { if (d->bfsc_data) av_freep(&d->bfsc_data); d->bfsc_ret = 0; } d->bfsc_ret = av_bitstream_filter_filter(opaque->bsfc, opaque->avctx, NULL, &d->pkt_temp.data, &d->pkt_temp.size, d->pkt.data, d->pkt.size, d->pkt.flags & AV_PKT_FLAG_KEY); if (d->bfsc_ret > 0) { d->bfsc_data = d->pkt_temp.data; } else if (d->bfsc_ret < 0) { ALOGE("%s: av_bitstream_filter_filter failed\n", __func__); ret = -1; goto fail; } if (d->pkt_temp.size == d->pkt.size + opaque->avctx->extradata_size) { d->pkt_temp.data += opaque->avctx->extradata_size; d->pkt_temp.size = d->pkt.size; } AMCTRACE("bsfc->filter(%d): %p[%d] -> %p[%d]", d->bfsc_ret, d->pkt.data, (int)d->pkt.size, d->pkt_temp.data, (int)d->pkt_temp.size); #else #if 0 AMCTRACE("raw [%d][%d] %02x%02x%02x%02x%02x%02x%02x%02x", (int)d->pkt_temp.size, (int)opaque->nal_size, d->pkt_temp.data[0], d->pkt_temp.data[1], d->pkt_temp.data[2], d->pkt_temp.data[3], d->pkt_temp.data[4], d->pkt_temp.data[5], d->pkt_temp.data[6], d->pkt_temp.data[7]); #endif convert_h264_to_annexb(d->pkt_temp.data, d->pkt_temp.size, opaque->nal_size, &convert_state); int64_t time_stamp = d->pkt_temp.pts; if (!time_stamp && d->pkt_temp.dts) time_stamp = d->pkt_temp.dts; if (time_stamp > 0) { time_stamp = av_rescale_q(time_stamp, is->video_st->time_base, AV_TIME_BASE_Q); } else { time_stamp = 0; } #if 0 AMCTRACE("input[%d][%d][%lld,%lld (%d, %d) -> %lld] %02x%02x%02x%02x%02x%02x%02x%02x", (int)d->pkt_temp.size, (int)opaque->nal_size, (int64_t)d->pkt_temp.pts, (int64_t)d->pkt_temp.dts, (int)is->video_st->time_base.num, (int)is->video_st->time_base.den, (int64_t)time_stamp, d->pkt_temp.data[0], d->pkt_temp.data[1], d->pkt_temp.data[2], d->pkt_temp.data[3], d->pkt_temp.data[4], d->pkt_temp.data[5], d->pkt_temp.data[6], d->pkt_temp.data[7]); #endif #endif } if (d->pkt_temp.data) { // reconfigure surface if surface changed // NULL surface cause no display if (ffpipeline_is_surface_need_reconfigure(pipeline)) { // request reconfigure before lock, or never get mutex opaque->acodec_reconfigure_request = true; SDL_LockMutex(opaque->acodec_mutex); ret = reconfigure_codec_l(env, node); opaque->acodec_reconfigure_request = false; SDL_CondSignal(opaque->acodec_cond); SDL_UnlockMutex(opaque->acodec_mutex); if (ret != 0) { ALOGE("%s: reconfigure_codec failed\n", __func__); ret = 0; goto fail; } SDL_LockMutex(opaque->acodec_first_dequeue_output_mutex); while (!q->abort_request && !opaque->acodec_reconfigure_request && !opaque->acodec_flush_request && opaque->acodec_first_dequeue_output_request) { SDL_CondWaitTimeout(opaque->acodec_first_dequeue_output_cond, opaque->acodec_first_dequeue_output_mutex, 1000); } SDL_UnlockMutex(opaque->acodec_first_dequeue_output_mutex); if (q->abort_request || opaque->acodec_reconfigure_request || opaque->acodec_flush_request) { ret = 0; goto fail; } } // no need to decode without surface if (!opaque->jsurface) { ret = amc_decode_picture_fake(node, 1000); goto fail; } input_buffer_index = SDL_AMediaCodec_dequeueInputBuffer(opaque->acodec, timeUs); if (input_buffer_index < 0) { if (SDL_AMediaCodec_isInputBuffersValid(opaque->acodec)) { // timeout ret = 0; goto fail; } else { // exception ret = amc_decode_picture_fake(node, 1000); goto fail; } } else { // remove all fake pictures if (opaque->fake_pictq.nb_packets > 0) ffp_packet_queue_flush(&opaque->fake_pictq); } input_buffer_ptr = SDL_AMediaCodec_getInputBuffer(opaque->acodec, input_buffer_index, &input_buffer_size); if (!input_buffer_ptr) { ALOGE("%s: SDL_AMediaCodec_getInputBuffer failed\n", __func__); ret = -1; goto fail; } copy_size = FFMIN(input_buffer_size, d->pkt_temp.size); memcpy(input_buffer_ptr, d->pkt_temp.data, copy_size); time_stamp = d->pkt_temp.pts; if (!time_stamp && d->pkt_temp.dts) time_stamp = d->pkt_temp.dts; if (time_stamp > 0) { time_stamp = av_rescale_q(time_stamp, is->video_st->time_base, AV_TIME_BASE_Q); } else { time_stamp = 0; } // ALOGE("queueInputBuffer, %lld\n", time_stamp); amc_ret = SDL_AMediaCodec_queueInputBuffer(opaque->acodec, input_buffer_index, 0, copy_size, time_stamp, 0); if (amc_ret != SDL_AMEDIA_OK) { ALOGE("%s: SDL_AMediaCodec_getInputBuffer failed\n", __func__); ret = -1; goto fail; } // ALOGE("%s: queue %d/%d", __func__, (int)copy_size, (int)input_buffer_size); opaque->input_packet_count++; if (enqueue_count) ++*enqueue_count; } if (input_buffer_size < 0) { d->packet_pending = 0; } else { d->pkt_temp.dts = d->pkt_temp.pts = AV_NOPTS_VALUE; if (d->pkt_temp.data) { d->pkt_temp.data += copy_size; d->pkt_temp.size -= copy_size; if (d->pkt_temp.size <= 0) d->packet_pending = 0; } else { // FIXME: detect if decode finished // if (!got_frame) { d->packet_pending = 0; d->finished = d->pkt_serial; // } } } // add by WilliamShi ffp->ab_tm = d->pkt_temp.ab_timestamp; fail: return ret; }
void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; /* the pts from last time */ if(delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio if not master source */ if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_master_clock(is); diff = vp->pts - ref_clock; /* Skip or repeat the frame. Take delay into account FFPlay still doesn't "know if this is the best guess." */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) { /* Really it should skip the picture instead */ actual_delay = 0.010; } schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); /* update queue for next picture! */ if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
static void PlayGame() { Uint8 *keystate; int quit = 0; int turn; int prev_ticks = 0, cur_ticks = 0; /* for keeping track of timing */ int awaiting_respawn = 0; /* framerate counter variables */ int start_time, end_time; int frames_drawn = 0; /* respawn timer */ int respawn_timer = -1; prev_ticks = SDL_GetTicks(); start_time = time(NULL); /* Reset the score counters. */ player.score = 0; opponent.score = 0; /* Start sound playback. */ StartAudio(); StartMusic(); /* Start the music update thread. */ music_update_thread = SDL_CreateThread(UpdateMusicThread, NULL); if (music_update_thread == NULL) { printf("Unable to start music update thread.\n"); } /* Start the game! */ while ((quit == 0) && network_ok) { /* Determine how many milliseconds have passed since the last frame, and update our motion scaling. */ prev_ticks = cur_ticks; cur_ticks = SDL_GetTicks(); time_scale = (double)(cur_ticks-prev_ticks)/30.0; /* Update SDL's internal input state information. */ SDL_PumpEvents(); /* Grab a snapshot of the keyboard. */ keystate = SDL_GetKeyState(NULL); /* Lock the mutex so we can access the player's data. */ SDL_LockMutex(player_mutex); /* If this is a network game, take note of variables set by the network thread. These are handled differently for a scripted opponent. */ if (opponent_type == OPP_NETWORK) { /* Has the opponent respawned? */ if (network_opponent_respawn) { printf("Remote player has respawned.\n"); opponent.shields = 100; network_opponent_respawn = 0; awaiting_respawn = 0; } /* Has the local player been hit? */ if (local_player_hit) { local_player_hit--; player.shields -= PHASER_DAMAGE; ShowPhaserHit(&player); /* No need to check for death, the other computer will tell us. */ } } /* Update phasers. */ player.firing -= time_scale; if (player.firing < 0) player.firing = 0; opponent.firing -= time_scale; if (opponent.firing < 0) opponent.firing = 0; ChargePhasers(&player); /* If the local player is destroyed, the respawn timer will start counting. During this time the controls are disabled and explosion sequence occurs. */ if (respawn_timer >= 0) { respawn_timer++; if (respawn_timer >= ((double)RESPAWN_TIME / time_scale)) { respawn_timer = -1; InitPlayer(&player); /* Set the local_player_respawn flag so the network thread will notify the opponent of the respawn. */ local_player_respawn = 1; SetStatusMessage("GOOD LUCK, WARRIOR!"); } } /* Respond to input and network events, but not if we're in a respawn. */ if (respawn_timer == -1) { if (keystate[SDLK_q] || keystate[SDLK_ESCAPE]) quit = 1; /* Left and right arrow keys control turning. */ turn = 0; if (keystate[SDLK_LEFT]) turn += 10; if (keystate[SDLK_RIGHT]) turn -= 10; /* Forward and back arrow keys activate thrusters. */ player.accel = 0; if (keystate[SDLK_UP]) player.accel = PLAYER_FORWARD_THRUST; if (keystate[SDLK_DOWN]) player.accel = PLAYER_REVERSE_THRUST; /* Spacebar fires phasers. */ if (keystate[SDLK_SPACE]) { if (CanPlayerFire(&player)) { FirePhasers(&player); /* If it's a hit, either notify the opponent or exact the damage. Create a satisfying particle burst. */ if (!awaiting_respawn && CheckPhaserHit(&player,&opponent)) { ShowPhaserHit(&opponent); DamageOpponent(); /* If that killed the opponent, set the "awaiting respawn" state, to prevent multiple kills. */ if (opponent.shields <= 0 && opponent_type == OPP_NETWORK) awaiting_respawn = 1; } } } /* Turn. */ player.angle += turn * time_scale; if (player.angle < 0) player.angle += 360; if (player.angle >= 360) player.angle -= 360; /* If this is a network game, the remote player will tell us if we've died. Otherwise we have to check for failed shields. */ if (((opponent_type == OPP_NETWORK) && local_player_dead) || (player.shields <= 0)) { printf("Local player has been destroyed.\n"); local_player_dead = 0; /* Kaboom! */ KillPlayer(); /* Respawn. */ respawn_timer = 0; } } /* If this is a player vs. computer game, give the computer a chance. */ if (opponent_type == OPP_COMPUTER) { if (RunGameScript() != 0) { fprintf(stderr, "Ending game due to script error.\n"); quit = 1; } /* Check for phaser hits against the player. */ if (opponent.firing) { if (CheckPhaserHit(&opponent,&player)) { ShowPhaserHit(&player); player.shields -= PHASER_DAMAGE; /* Did that destroy the player? */ if (respawn_timer < 0 && player.shields <= 0) { KillPlayer(); respawn_timer = 0; } } } ChargePhasers(&opponent); UpdatePlayer(&opponent); } /* Update the player's position. */ UpdatePlayer(&player); /* Update the status information. */ SetPlayerStatusInfo(player.score, player.shields, player.charge); SetOpponentStatusInfo(opponent.score, opponent.shields); /* Make the camera follow the player (but impose limits). */ camera_x = player.world_x - SCREEN_WIDTH/2; camera_y = player.world_y - SCREEN_HEIGHT/2; if (camera_x < 0) camera_x = 0; if (camera_x >= WORLD_WIDTH-SCREEN_WIDTH) camera_x = WORLD_WIDTH-SCREEN_WIDTH-1; if (camera_y < 0) camera_y = 0; if (camera_y >= WORLD_HEIGHT-SCREEN_HEIGHT) camera_y = WORLD_HEIGHT-SCREEN_HEIGHT-1; /* Update the particle system. */ UpdateParticles(); /* Keep OpenAL happy. */ UpdateAudio(&player, &opponent); /* Redraw everything. */ DrawBackground(screen, camera_x, camera_y); DrawParallax(screen, camera_x, camera_y); DrawParticles(screen, camera_x, camera_y); if (opponent.firing) DrawPhaserBeam(&opponent, screen, camera_x, camera_y); if (player.firing) DrawPhaserBeam(&player, screen, camera_x, camera_y); if (respawn_timer < 0) DrawPlayer(&player); if (!awaiting_respawn) DrawPlayer(&opponent); UpdateStatusDisplay(screen); /* Release the mutex so the networking system can get it. It doesn't stay unlocked for very long, but the networking system should still have plenty of time. */ SDL_UnlockMutex(player_mutex); /* Flip the page. */ SDL_Flip(screen); frames_drawn++; } end_time = time(NULL); if (start_time == end_time) end_time++; /* Display the average framerate. */ printf("Drew %i frames in %i seconds, for a framerate of %.2f fps.\n", frames_drawn, end_time-start_time, (float)frames_drawn/(float)(end_time-start_time)); /* Terminate the music update thread. */ if (music_update_thread != NULL) { SDL_KillThread(music_update_thread); music_update_thread = NULL; } /* Stop audio playback. */ StopAudio(); StopMusic(); }