bool IOConfigDirectory::initWithOffset(int start, int type)
{
    IOReturn status = kIOReturnSuccess;
    const UInt32 *data;
	
    if( !OSObject::init() )
	{
        status = kIOReturnError;
	}
    
	if( status == kIOReturnSuccess )
	{
		fStart = start;
		fType = type;
    
		status = updateROMCache( start, 1 );
	}
	
	if( status == kIOReturnSuccess )
	{
		data = lockData();
		fNumEntries = (OSSwapBigToHostInt32(data[start]) & kConfigLeafDirLength) >> kConfigLeafDirLengthPhase;
		unlockData();
	
		if( fNumEntries > 256 ) // 1k request
		{
			status = kIOReturnNoMemory;
		}
	}
bool LLQueuedThread::completeRequest(handle_t handle)
{
	bool res = false;
	lockData();
	QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
	if (req)
	{
		llassert_always(req->getStatus() != STATUS_QUEUED);
		llassert_always(req->getStatus() != STATUS_INPROGRESS);
#if _DEBUG
// 		LL_INFOS() << llformat("LLQueuedThread::Completed req [%08d]",handle) << LL_ENDL;
#endif
		if (!(req->getFlags() & FLAG_LOCKED))
		{
			mRequestHash.erase(handle);
			req->deleteRequest();
// 			check();
 		}
 		else
 		{
			// Cause deletion immediately after FLAG_LOCKED is released.
 			req->setFlags(FLAG_AUTO_COMPLETE);
 		}
		res = true;
	}
	unlockData();
	return res;
}
//virtual
// May be called from any thread
S32 LLQueuedThread::getPending()
{
	S32 res;
	lockData();
	res = mRequestQueue.size();
	unlockData();
	return res;
}
示例#4
0
// MAIN thread
LLQueuedThread::handle_t LLQueuedThread::generateHandle()
{
	lockData();
	while ((mNextHandle == nullHandle()) || (mRequestHash.find(mNextHandle)))
	{
		mNextHandle++;
	}
	unlockData();
	return mNextHandle++;
}
示例#5
0
int NET2_TCPSend(int socket, char *buf, int len)
{
  int val = 0;

  lockData();
  val = raw_NET2_TCPSend(socket, buf, len);
  unlockData();

  return val;
}
示例#6
0
int NET2_TCPConnectToIP(IPaddress *ip)
{
  int val = 0;

  lockData();
  val = raw_NET2_TCPConnectToIP(ip);
  unlockData();

  return val;
}
示例#7
0
IPaddress *NET2_TCPGetPeerAddress(int s)
{
  IPaddress *ip = NULL;

  lockData();
  ip = raw_NET2_TCPGetPeerAddress(s);
  unlockData();

  return ip;
}
void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete)
{
	lockData();
	QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
	if (req)
	{
		req->setFlags(FLAG_ABORT | (autocomplete ? FLAG_AUTO_COMPLETE : 0));
	}
	unlockData();
}
// MAIN thread
void LLQueuedThread::setFlags(handle_t handle, U32 flags)
{
	lockData();
	QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
	if (req)
	{
		req->setFlags(flags);
	}
	unlockData();
}
示例#10
0
int NET2_UDPSend(IPaddress *ip, char *buf, int len)
{
  int val = -1;

  lockData();
  val = raw_NET2_UDPSend(ip, buf, len);
  unlockData();

  return val;
}
示例#11
0
int NET2_TCPAcceptOn(int port)
{
  int val = 0;

  lockData();
  val = raw_NET2_TCPAcceptOn(port);
  unlockData();

  return val;
}
示例#12
0
int NET2_TCPAcceptOnIP(IPaddress *ip)
{
  int val = 0;

  lockData();
  val = raw_NET2_TCPAcceptOnIP(ip);
  unlockData();

  return val;
}
示例#13
0
int NET2_UDPAcceptOn(int port, int size)
{
  int val = -1;

  lockData();
  val = raw_NET2_UDPAcceptOn(port, size);
  unlockData();

  return val;
}
示例#14
0
int NET2_TCPConnectTo(char *host, int port)
{
  int val = 0;

  lockData();
  val = raw_NET2_TCPConnectTo(host, port);
  unlockData();

  return val;
}
示例#15
0
/// Run the interpreter.
void FLLua::run()
{
	LL_INFOS("Lua") << __LINE__ << ": *** THREAD LOOP STARTS HERE ***" << llendl;
	while(1)
	{
		if (!pLuaStack /*|| mError*/ || LLApp::isError() || LLApp::isStopped())
			break;	//Broked.

		bool locked=false;
		mAllowPause=true; //Let FLLUa::CriticalSection() sync with MAIN on first call.
		// Process Hooks
		if(mPendingHooks)
		{
			lockData();
			locked=true;
			mPendingHooks=false;
			while(!mQueuedHooks.empty()) //Allow multiple hooks per loop.
			{
				// Peek at the top of the stack
				HookRequest *hr = mQueuedHooks.front();	
				ExecuteHook(hr);
				mQueuedHooks.pop();
				delete hr; //Say no to memory leaks.
			}
			//if(mError)goto endloop;
		}
		// Process Macros/Raw Commands
		if(mPendingCommands)
		{
			if(!locked)
				lockData();
			locked=true;
			mPendingCommands=false;
			while(!mQueuedCommands.empty())
			{
				// Peek at the top of the stack
				std::string &hr = mQueuedCommands.front(); //byref. faster.
				if(FLLua::isMacro(hr))
					RunMacro(hr); // Run macro.
				else
					RunString(hr); // Run command.
				mQueuedCommands.pop(); //safe to pop now.
				//if(mError)goto endloop;
			} 
		}
		mAllowPause=true;
//endloop:
		if(locked) 
			unlockData(); //Always.
		//if(mError)break;
		yield();
		ms_sleep(10);
	}
	LL_INFOS("Lua") << __LINE__ << ": *** THREAD EXITING ***" << llendl;
}
示例#16
0
UDPpacket *NET2_UDPRead(int socket)
{
  UDPpacket *val = NULL;

  lockData();
  val = raw_NET2_UDPRead(socket);
  unlockData();
  signalRead();

  return val;
}
// MAIN thread
LLQueuedThread::QueuedRequest* LLQueuedThread::getRequest(handle_t handle)
{
	if (handle == nullHandle())
	{
		return 0;
	}
	lockData();
	QueuedRequest* res = (QueuedRequest*)mRequestHash.find(handle);
	unlockData();
	return res;
}
LLQueuedThread::status_t LLQueuedThread::getRequestStatus(handle_t handle)
{
	status_t res = STATUS_EXPIRED;
	lockData();
	QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
	if (req)
	{
		res = req->getStatus();
	}
	unlockData();
	return res;
}
// MAIN thread
void LLQueuedThread::printQueueStats()
{
	lockData();
	if (!mRequestQueue.empty())
	{
		QueuedRequest *req = *mRequestQueue.begin();
		llinfos << llformat("Pending Requests:%d Current status:%d", mRequestQueue.size(), req->getStatus()) << llendl;
	}
	else
	{
		llinfos << "Queued Thread Idle" << llendl;
	}
	unlockData();
}
示例#20
0
static __inline__ int sendEvent(Uint8 code, int data1, int data2) {
	SDL_Event event;

	event.type = SDL_USEREVENT;
	event.user.code = code;
	event.user.data1 = (void *)data1;
	event.user.data2 = (void *)data2;

	SDL_assert(dataLocked);

	unlockData();
	FE_PushEvent(&event);
	lockData();

	return 0;
}
// MAIN thread
bool LLQueuedThread::addRequest(QueuedRequest* req)
{
	if (mStatus == QUITTING)
	{
		return false;
	}
	
	lockData();
	req->setStatus(STATUS_QUEUED);
	mRequestQueue.insert(req);
	mRequestHash.insert(req);
#if _DEBUG
// 	llinfos << llformat("LLQueuedThread::Added req [%08d]",handle) << llendl;
#endif
	unlockData();

	incQueue();

	return true;
}
void LLQueuedThread::setPriority(handle_t handle, U32 priority)
{
	lockData();
	QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
	if (req)
	{
		if(req->getStatus() == STATUS_INPROGRESS)
		{
			// not in list
			req->setPriority(priority);
		}
		else if(req->getStatus() == STATUS_QUEUED)
		{
			// remove from list then re-insert
			llverify(mRequestQueue.erase(req) == 1);
			req->setPriority(priority);
			mRequestQueue.insert(req);
		}
	}
	unlockData();
}
// MAIN thread
bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_complete)
{
	llassert (handle != nullHandle());
	bool res = false;
	bool waspaused = isPaused();
	bool done = false;
	while(!done)
	{
		update(0); // unpauses
		lockData();
		QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
		if (!req)
		{
			done = true; // request does not exist
		}
		else if (req->getStatus() == STATUS_COMPLETE && !(req->getFlags() & FLAG_LOCKED))
		{
			res = true;
			if (auto_complete)
			{
				mRequestHash.erase(handle);
				req->deleteRequest();
// 				check();
			}
			done = true;
		}
		unlockData();
		
		if (!done && mThreaded)
		{
			yield();
		}
	}
	if (waspaused)
	{
		pause();
	}
	return res;
}
示例#24
0
static __inline__ int sendEvent(Uint8 code, int data1, int data2)
{
  SDL_Event event;

  event.type = SDL_USEREVENT;
  event.user.code = code;
  event.user.data1 = (void *)data1;
  event.user.data2 = (void *)data2;

  if (dataLocked)
  {
    unlockData();
    FE_PushEvent(&event);
    lockData();
  }
  else
  {
    //printf("this should not happen\n"); fflush(NULL);
    exit(1);
  }

  return 0;
}
S32 LLQueuedThread::processNextRequest()
{
	QueuedRequest *req;
	// Get next request from pool
	lockData();
	while(1)
	{
		req = NULL;
		if (mRequestQueue.empty())
		{
			break;
		}
		req = *mRequestQueue.begin();
		mRequestQueue.erase(mRequestQueue.begin());
		if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
		{
			req->setStatus(STATUS_ABORTED);
			// Unlock, because we can't call finishRequest() while keeping this lock:
			// generateHandle() takes this lock too and is called while holding a lock
			// (ie LLTextureFetchWorker::mWorkMutex) that finishRequest will lock too,
			// causing a dead lock.
			// Although a complete rewrite of LLQueuedThread is in order because it's
			// far from robust the way it handles it's locking; the following rationale
			// at least makes plausible that releasing the lock here SHOULD work if
			// the original coder didn't completely f**k up: if before the QueuedRequest
			// req was only accessed while keeping the lock, then it still should
			// never happen that another thread is waiting for this lock in order to
			// access the QueuedRequest: a few lines lower we delete it.
			// In other words, if directly after releasing this lock another thread
			// would access req, then that can only happen by finding it again in
			// either mRequestQueue or mRequestHash. We already deleted it from the
			// first, so this access would have to happen by finding it in mRequestHash.
			// Such access happens in the following functions:
			// 1) LLQueuedThread::shutdown -- but it does that anyway, as it doesn't use any locking.
			// 2) LLQueuedThread::generateHandle -- might skip our handle while before it would block until we deleted it. Skipping it is actually better.
			// 3) LLQueuedThread::waitForResult -- this now doesn't touch the req when it has the flag FLAG_LOCKED set.
			// 4) LLQueuedThread::getRequest -- whereever this is used, FLAG_LOCKED is tested before using the req.
			// 5) LLQueuedThread::getRequestStatus -- this is a read-only operation on the status, which should never be changed from finishRequest().
			// 6) LLQueuedThread::abortRequest -- it doesn't seem to hurt to add flags (if this happens at all), while calling finishRequest().
			// 7) LLQueuedThread::setFlags -- same.
			// 8) LLQueuedThread::setPriority -- doesn't access req with status STATUS_ABORTED, STATUS_COMPLETE or STATUS_INPROGRESS.
			// 9) LLQueuedThread::completeRequest -- now sets FLAG_AUTO_COMPLETE instead of deleting the req, if FLAG_LOCKED is set, so that deletion happens here when finishRequest returns.
			req->setFlags(FLAG_LOCKED);
			unlockData();
			req->finishRequest(false);
			lockData();
			req->resetFlags(FLAG_LOCKED);
			if ((req->getFlags() & FLAG_AUTO_COMPLETE))
			{
				req->resetFlags(FLAG_AUTO_COMPLETE);
				mRequestHash.erase(req);
// 				check();
				unlockData();
				req->deleteRequest();
				lockData();
			}
			continue;
		}
		llassert_always(req->getStatus() == STATUS_QUEUED);
		break;
	}
	U32 start_priority = 0 ;
	if (req)
	{
		req->setStatus(STATUS_INPROGRESS);
		start_priority = req->getPriority();
	}
	unlockData();

	// This is the only place we will call req->setStatus() after
	// it has initially been seet to STATUS_QUEUED, so it is
	// safe to access req.
	if (req)
	{
		// process request
		bool complete = req->processRequest();

		if (complete)
		{
			lockData();
			req->setStatus(STATUS_COMPLETE);
			req->setFlags(FLAG_LOCKED);
			unlockData();
			req->finishRequest(true);
			if ((req->getFlags() & FLAG_AUTO_COMPLETE))
			{
				lockData();
				req->resetFlags(FLAG_AUTO_COMPLETE);
				mRequestHash.erase(req);
// 				check();
				req->resetFlags(FLAG_LOCKED);
				unlockData();
				req->deleteRequest();
			}
			else
			{
				req->resetFlags(FLAG_LOCKED);
			}
		}
		else
		{
			lockData();
			req->setStatus(STATUS_QUEUED);
			mRequestQueue.insert(req);
			unlockData();
			if (mThreaded && start_priority < PRIORITY_NORMAL)
			{
				ms_sleep(1); // sleep the thread a little
			}
		}
	}

	S32 pending = getPending();

	return pending;
}
示例#26
0
static int PumpNetworkEvents(void *nothing)
{
  int i = 0;

#define timeOut (10)

  while (!doneYet)
  {
    if (-1 == snCheckSockets(socketSet, timeOut))
    {
      setError("NET2: the CheckSockets call failed", -1);
    }

    lockData();
    while ((!doneYet) && waitForRead)
    {
      waitUntilRead();
    }

    for (i = 0; ((!doneYet) && (i < lastHeapSocket)); i++)
    {
      if (addState == socketHeap[i]->state)
      {
        switch(socketHeap[i]->type)
        {
        case unusedSocket:
          sendError("NET2: trying to add an unused socket", i);
          break;

        case TCPServerSocket:
        case TCPClientSocket:
          if (-1 != snTCPAddSocket(socketHeap[i]->s.tcpSocket))
          {
            socketHeap[i]->state = readyState;
          }
          else
          {
            socketHeap[i]->state = delState;
            sendError("NET2: can't add a TCP socket to the socket set", i);
          }
          break;

        case UDPServerSocket:
          if (-1 != snUDPAddSocket(socketHeap[i]->s.udpSocket))
          {
            socketHeap[i]->state = readyState;
          }
          else
          {
            socketHeap[i]->state = delState;
            sendError("NET2: can't add a UDP socket to the socket set", i);
          }
          break;

        default:
          sendError("NET2: invalid socket type, this should never happen", i);
          break;
        }
      }
      else if (delState == socketHeap[i]->state)
      {
        switch(socketHeap[i]->type)
        {
        case unusedSocket:
          sendError("NET2: trying to delete an unused socket", i);
          break;

        case TCPServerSocket:
        case TCPClientSocket:
          if (-1 == snTCPDelSocket(socketHeap[i]->s.tcpSocket))
          {
            sendError("NET2: can't delete a TCP socket from the socket set", i);
          }
          snTCPClose(socketHeap[i]->s.tcpSocket);
          FreeSocket(i);
          break;

        case UDPServerSocket:
          if (-1 == snUDPDelSocket(socketHeap[i]->s.udpSocket))
          {
            sendError("NET2: can't delete a UDP socket from the socket set", i);
          }
          snUDPClose(socketHeap[i]->s.udpSocket);
          FreeSocket(i);
          break;

        default:
          sendError("NET2: invalid socket type, this should never happen", i);
          break;
        }
      }
      else if ((TCPServerSocket == socketHeap[i]->type) &&
               (snSocketReady(socketHeap[i]->s.genSocket)))
      {
        TCPsocket socket = NULL;
        socket = snTCPAccept(socketHeap[i]->s.tcpSocket);

        if (NULL != socket)
        {
          int s = -1;

          s = AllocSocket(TCPClientSocket);
          if (-1 != s)
          {
            //printf("got a connection!\n");

            socketHeap[s]->s.tcpSocket = socket;
            socketHeap[s]->state = addState;
            sendEvent(NET2_TCPACCEPTEVENT,  s, socketHeap[i]->p.tcpPort);
          }
          else // can't handle the connection, so close it.
          {
            snTCPClose(socket);
            sendError("NET2: a TCP accept failed", i); // let the app know
          }
        }
      }
      else if ((TCPClientSocket == socketHeap[i]->type) && 
               (readyState == socketHeap[i]->state) &&
               (snSocketReady(socketHeap[i]->s.genSocket)))
      {
        int len;
        CharQue *tb = &socketHeap[i]->q.tb;

        if (tcpQueLen <= tb->len)
        {
          waitForRead = 1;
        }
        else
        {
          len = snTCPRead(socketHeap[i]->s.tcpSocket, 
                          &tb->buf[tb->len], 
                          tcpQueLen - tb->len);

          if (0 < len)
          {
            int oldlen = tb->len;
            tb->len += len;
            if (0 == oldlen)
            {
              sendEvent(NET2_TCPRECEIVEEVENT, i, -1);
            }
          }
          else // no byte, must be dead.
          {
            socketHeap[i]->state = dyingState;
            sendEvent(NET2_TCPCLOSEEVENT, i, -1);
          }
        }
      }
      else if ((UDPServerSocket == socketHeap[i]->type) && 
               (readyState == socketHeap[i]->state) &&
               (snSocketReady(socketHeap[i]->s.genSocket)))
      {
        int recv = 0;
        UDPpacket *p = NULL;
        PacketQue *ub = &socketHeap[i]->q.ub;

        if (PacketQueFull(ub))
        {
          waitForRead = 1;
        }
        else
        {
          while ((!PacketQueFull(ub)) &&
                 (NULL != (p = snUDPAllocPacket(socketHeap[i]->p.udpLen))) &&
                 (1 == (recv = snUDPRecv(socketHeap[i]->s.udpSocket, p))))
          {
            if (PacketQueEmpty(ub))
            {
              EnquePacket(ub, &p);
              sendEvent(NET2_UDPRECEIVEEVENT, i, -1);
            }
            else
            {
              EnquePacket(ub, &p);
            }
          }

          // unravel terminating conditions and free left over memory
          // if we need to
          if (!PacketQueFull(ub)) // if the packet que is full then everything is fine
          {
            if (NULL != p) // couldn't alloc a packet
            {
              if (0 >= recv) // ran out of packets
              {
                snUDPFreePacket(p);
              }
            }
            else
            {
              sendError("NET2: out of memory", i);
            }
          }
        }
      }
    }
    unlockData();
  }

  return 0;
}
示例#27
0
void NET2_TCPClose(int socket)
{
  lockData();
  raw_NET2_TCPClose(socket);
  unlockData();
}