bool Texture::reload() { if (_name.empty()) return false; ::Aurora::FileType type = ::Aurora::kFileTypeNone; ImageDecoder *image = 0; TXI *txi = 0; try { txi = loadTXI (_name); image = loadImage(_name, type, txi); } catch (Common::Exception &e) { delete txi; e.add("Failed to reload texture \"%s\" (%d)", _name.c_str(), type); throw; } removeFromQueues(); set(_name, image, type, txi); addToQueues(); return true; }
//----------------------------------------------------------------------- void PagingLandScapePageManager::queuePageNeighbors() { // Queue the rest // Loading must be done one by one to avoid FPS drop, so they are queued. // We must load the next visible LandScape pages, // check the LandScape boundaries for (unsigned int i = mCurrentcam->mPreIniX; i <= mCurrentcam->mPreFinX; i++) { for (unsigned int j = mCurrentcam->mPreIniZ; j <= mCurrentcam->mPreFinZ; j++) { // pages in this zone around camera, must be at // least preloading. that means they can be // loaded too. PagingLandScapePage* p = getPage(i, j, true); if (!(p->isInLoadQueue() || p->isLoaded())) { if ((j >= mCurrentcam->mIniZ) && (j <= mCurrentcam->mFinZ) && (i >= mCurrentcam->mIniX) && (i <= mCurrentcam->mFinX)) { // pages in this tighter zone // around camera must be Loading // or Loaded as they may be // below camera very soon. removeFromQueues(p); mPageLoadQueue.push(p); p->setInQueue(PagingLandScapePage::QUEUE_LOAD); } else { // must be at least preloading p->preloadInBackground(); } } p->touch(); } } }
//----------------------------------------------------------------------- void PagingLandScapePageManager::releasePage(PagingLandScapePage* p) { removeFromQueues(p); p->uninit(); mActivePages.remove(p); mFreePages.push_back(p); }
Texture::~Texture() { removeFromQueues(); if (_textureID != 0) GfxMan.abandon(&_textureID, 1); delete _txi; delete _image; }
static void throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception, rtsBool stop_at_atomically, StgUpdateFrame *stop_here) { // Thread already dead? if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) { return; } // Remove it from any blocking queues removeFromQueues(cap,tso); raiseAsync(cap, tso, exception, stop_at_atomically, stop_here); }
nat throwToMsg (Capability *cap, MessageThrowTo *msg) { StgWord status; StgTSO *target = msg->target; Capability *target_cap; goto check_target; retry: write_barrier(); debugTrace(DEBUG_sched, "throwTo: retrying..."); check_target: ASSERT(target != END_TSO_QUEUE); // Thread already dead? if (target->what_next == ThreadComplete || target->what_next == ThreadKilled) { return THROWTO_SUCCESS; } debugTraceCap(DEBUG_sched, cap, "throwTo: from thread %lu to thread %lu", (unsigned long)msg->source->id, (unsigned long)msg->target->id); #ifdef DEBUG traceThreadStatus(DEBUG_sched, target); #endif target_cap = target->cap; if (target->cap != cap) { throwToSendMsg(cap, target_cap, msg); return THROWTO_BLOCKED; } status = target->why_blocked; switch (status) { case NotBlocked: { if ((target->flags & TSO_BLOCKEX) == 0) { // It's on our run queue and not blocking exceptions raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } else { blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } } case BlockedOnMsgThrowTo: { const StgInfoTable *i; MessageThrowTo *m; m = target->block_info.throwto; // target is local to this cap, but has sent a throwto // message to another cap. // // The source message is locked. We need to revoke the // target's message so that we can raise the exception, so // we attempt to lock it. // There's a possibility of a deadlock if two threads are both // trying to throwTo each other (or more generally, a cycle of // threads). To break the symmetry we compare the addresses // of the MessageThrowTo objects, and the one for which m < // msg gets to spin, while the other can only try to lock // once, but must then back off and unlock both before trying // again. if (m < msg) { i = lockClosure((StgClosure *)m); } else { i = tryLockClosure((StgClosure *)m); if (i == NULL) { // debugBelch("collision\n"); throwToSendMsg(cap, target->cap, msg); return THROWTO_BLOCKED; } } if (i == &stg_MSG_NULL_info) { // we know there's a MSG_TRY_WAKEUP on the way, so we // might as well just do it now. The message will // be a no-op when it arrives. unlockClosure((StgClosure*)m, i); tryWakeupThread(cap, target); goto retry; } if (i != &stg_MSG_THROWTO_info) { // if it's a MSG_NULL, this TSO has been woken up by another Cap unlockClosure((StgClosure*)m, i); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { unlockClosure((StgClosure*)m, i); blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } // nobody else can wake up this TSO after we claim the message doneWithMsgThrowTo(m); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } case BlockedOnMVar: case BlockedOnMVarRead: { /* To establish ownership of this TSO, we need to acquire a lock on the MVar that it is blocked on. */ StgMVar *mvar; StgInfoTable *info USED_IF_THREADS; mvar = (StgMVar *)target->block_info.closure; // ASSUMPTION: tso->block_info must always point to a // closure. In the threaded RTS it does. switch (get_itbl((StgClosure *)mvar)->type) { case MVAR_CLEAN: case MVAR_DIRTY: break; default: goto retry; } info = lockClosure((StgClosure *)mvar); // we have the MVar, let's check whether the thread // is still blocked on the same MVar. if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead) || (StgMVar *)target->block_info.closure != mvar) { unlockClosure((StgClosure *)mvar, info); goto retry; } if (target->_link == END_TSO_QUEUE) { // the MVar operation has already completed. There is a // MSG_TRY_WAKEUP on the way, but we can just wake up the // thread now anyway and ignore the message when it // arrives. unlockClosure((StgClosure *)mvar, info); tryWakeupThread(cap, target); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); unlockClosure((StgClosure *)mvar, info); return THROWTO_BLOCKED; } else { // revoke the MVar operation removeFromMVarBlockedQueue(target); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); unlockClosure((StgClosure *)mvar, info); return THROWTO_SUCCESS; } } case BlockedOnBlackHole: { if (target->flags & TSO_BLOCKEX) { // BlockedOnBlackHole is not interruptible. blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } else { // Revoke the message by replacing it with IND. We're not // locking anything here, so we might still get a TRY_WAKEUP // message from the owner of the blackhole some time in the // future, but that doesn't matter. ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info); OVERWRITE_INFO(target->block_info.bh, &stg_IND_info); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } } case BlockedOnSTM: lockTSO(target); // Unblocking BlockedOnSTM threads requires the TSO to be // locked; see STM.c:unpark_tso(). if (target->why_blocked != BlockedOnSTM) { unlockTSO(target); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); unlockTSO(target); return THROWTO_BLOCKED; } else { raiseAsync(cap, target, msg->exception, rtsFalse, NULL); unlockTSO(target); return THROWTO_SUCCESS; } case BlockedOnCCall_Interruptible: #ifdef THREADED_RTS { Task *task = NULL; // walk suspended_ccalls to find the correct worker thread InCall *incall; for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) { if (incall->suspended_tso == target) { task = incall->task; break; } } if (task != NULL) { blockedThrowTo(cap, target, msg); if (!((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0))) { interruptWorkerTask(task); } return THROWTO_BLOCKED; } else { debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill"); } // fall to next } #endif case BlockedOnCCall: blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; #ifndef THREADEDED_RTS case BlockedOnRead: case BlockedOnWrite: case BlockedOnDelay: #if defined(mingw32_HOST_OS) case BlockedOnDoProc: #endif if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } else { removeFromQueues(cap,target); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } #endif case ThreadMigrating: // if is is ThreadMigrating and tso->cap is ours, then it // *must* be migrating *to* this capability. If it were // migrating away from the capability, then tso->cap would // point to the destination. // // There is a MSG_WAKEUP in the message queue for this thread, // but we can just do it preemptively: tryWakeupThread(cap, target); // and now retry, the thread should be runnable. goto retry; default: barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked); } barf("throwTo"); }
void Texture::refresh() { removeFromQueues(); addToQueues(); }
Texture::~Texture() { removeFromQueues(); if (_textureID != 0) GfxMan.abandon(&_textureID, 1); }