Esempio n. 1
0
//-----------------------------------------------------------------------------
C700Driver::C700Driver()
: mSampleRate(44100.0),
  mVelocityMode( kVelocityMode_Square ),
  mVPset(NULL)
{
    MutexInit(mREGLOGEvtMtx);
    MutexInit(mMIDIEvtMtx);
    
	for ( int i=0; i<NUM_BANKS; i++ ) {
		mDrumMode[i] = false;
	}
	for ( int bnk=0; bnk<NUM_BANKS; bnk++ ) {
		for ( int i=0; i<128; i++ ) {
			mKeyMap[bnk][i] = 0;
		}
	}
	//Initialize
    mVibfreq = 0.00137445;
	mVibdepth = 0.5;
    
    mVoiceLimit = 8;
    mIsAccurateMode = false;
    mFastReleaseAsKeyOff = true;

    mEventDelayClocks = 8192;   // 8ms
    mEventDelaySamples = calcEventDelaySamples();
    
	for (int i=0; i<16; i++) {
        mChStat[i].changeFlg = 0;
        mChStat[i].prog = 0;
        mChStat[i].pitchBend = 0;
        mChStat[i].vibDepth = 0;
        mChStat[i].pbRange = static_cast<float>(DEFAULT_PBRANGE);
        //mChStat[i].portaOn = false;
        mChStat[i].portaTc = 1.0f;
        mChStat[i].portaStartPitch = 0;
        mChStat[i].volume = VOLUME_DEFAULT;
        mChStat[i].expression = EXPRESSION_DEFAULT;
        mChStat[i].pan = 64;
        //mChStat[i].releasePriority = 0;
        mChStat[i].damper = false;
        
        mChStat[i].lastNote = 0;
	}
    mVoiceManager.Initialize(8);
    
	Reset();
}
MUTEX * avtExecutionManager::FindMutex( const MUTEX_ID id )
{
    if (tPool == NULL)
        return NULL;

    std::map<MUTEX_ID, MUTEX *>::iterator it;
    MUTEX *lock;

    MutexLock( &mutexMapLock );

    it = mutexMap.find( id );
    if( it == mutexMap.end() )
    {
        // Not found, create it.
        lock = new MUTEX;
        MutexInit( lock );

        mutexMap.insert( std::pair<MUTEX_ID, MUTEX *>(id, lock) );
    }
    else
    {
        lock = it->second;
    }

    MutexUnlock( &mutexMapLock );

    return( lock );
}
Esempio n. 3
0
  void BuildQueueInit(BuildQueue* queue, const BuildQueueConfig* config)
  {
    CHECK(config->m_MaxExpensiveCount > 0 && config->m_MaxExpensiveCount <= config->m_ThreadCount);

    MutexInit(&queue->m_Lock);
    CondInit(&queue->m_WorkAvailable);

    // Compute queue capacity. Allocate space for a power of two number of
    // indices that's at least one larger than the max number of nodes. Because
    // the queue is treated as a ring buffer, we want W=R to mean an empty
    // buffer.
    uint32_t capacity = NextPowerOfTwo(config->m_MaxNodes + 1);

    MemAllocHeap* heap = config->m_Heap;

    queue->m_Queue              = HeapAllocateArray<int32_t>(heap, capacity);
    queue->m_QueueReadIndex     = 0;
    queue->m_QueueWriteIndex    = 0;
    queue->m_QueueCapacity      = capacity;
    queue->m_Config             = *config;
    queue->m_PendingNodeCount   = 0;
    queue->m_FailedNodeCount    = 0;
    queue->m_QuitSignalled      = false;
    queue->m_ExpensiveRunning   = 0;
    queue->m_ExpensiveWaitCount = 0;
    queue->m_ExpensiveWaitList  = HeapAllocateArray<NodeState*>(heap, capacity);

    CHECK(queue->m_Queue);

    if (queue->m_Config.m_ThreadCount > kMaxBuildThreads)
    {
      Log(kWarning, "too many build threads (%d) - clamping to %d",
          queue->m_Config.m_ThreadCount, kMaxBuildThreads);

      queue->m_Config.m_ThreadCount = kMaxBuildThreads;
    }

    Log(kDebug, "build queue initialized; ring buffer capacity = %u", queue->m_QueueCapacity);

    // Block all signals on the main thread.
    SignalBlockThread(true);
    SignalHandlerSetCondition(&queue->m_WorkAvailable);

    // Create build threads.
    for (int i = 0, thread_count = config->m_ThreadCount; i < thread_count; ++i)
    {
      ThreadState* thread_state = &queue->m_ThreadState[i];

      ThreadStateInit(thread_state, queue, MB(64), MB(32), i);

      if (i > 0)
      {
        Log(kDebug, "starting build thread %d", i);
        queue->m_Threads[i] = ThreadStart(BuildThreadRoutine, thread_state);
      }
    }
  }
Esempio n. 4
0
Condition::Condition(void)
{
    _mutex = MutexInit();

#if defined(POSIX_THREADS)
    _condition = malloc(sizeof(cond_t));
    cond_init((cond_t *)_condition, USYNC_THREAD, NULL);
#endif

}
Esempio n. 5
0
bool MQTTClientInit(MQTTClient *c, Network *network, unsigned int command_timeout_ms,
                    unsigned char *sendbuf, size_t sendbuf_size, unsigned char *readbuf, size_t readbuf_size)
{
    int i;
    c->ipstack = network;

    for (i = 0; i < MAX_MESSAGE_HANDLERS; ++i) {
        c->messageHandlers[i].topicFilter = 0;
    }

    if (command_timeout_ms != 0) {
        c->command_timeout_ms = command_timeout_ms;
    } else {
        c->command_timeout_ms = CONFIG_MQTT_SEND_CYCLE;
    }

    if (sendbuf) {
        c->buf = sendbuf;
        c->buf_size = sendbuf_size;
    } else {
        c->buf = (unsigned char *)malloc(CONFIG_MQTT_SEND_BUFFER);

        if (c->buf) {
            c->buf_size = CONFIG_MQTT_SEND_BUFFER;
        } else {
            return false;
        }
    }

    if (readbuf) {
        c->readbuf = readbuf;
        c->readbuf_size = readbuf_size;
    } else {
        c->readbuf = (unsigned char *)malloc(CONFIG_MQTT_RECV_BUFFER);

        if (c->readbuf) {
            c->readbuf_size = CONFIG_MQTT_RECV_BUFFER;
        } else {
            return false;
        }
    }

    c->isconnected = 0;
    c->cleansession = 0;
    c->ping_outstanding = 0;
    c->defaultMessageHandler = NULL;
    c->next_packetid = 1;
    TimerInit(&c->last_sent);
    TimerInit(&c->last_received);
    TimerInit(&c->ping_wait);
#if defined(MQTT_TASK)
    MutexInit(&c->mutex);
#endif
    return true;
}
Esempio n. 6
0
void InitDebug (void)
{
	int32 t;
	
	MutexInit (&dbg_mutex);
	
	klog_to_screen = TRUE;
	KClearScreen();
	
	current_log = 0;
	
	for (t=0; t< KLOG_ENTRIES; t++)
	{
		klog_entry[t][0] = '\0';
	}
}
Esempio n. 7
0
void HeapInit(MemAllocHeap* heap, size_t capacity, uint32_t flags)
{
#if ENABLED(USE_DLMALLOC)
  heap->m_MemSpace = create_mspace(capacity, 0);
  if (!heap->m_MemSpace)
    Croak("couldn't create memspace for new heap");
#else
  heap->m_MemSpace = nullptr;
#endif

  heap->m_Flags = flags;

  if (flags & HeapFlags::kThreadSafe)
  {
    MutexInit(&heap->m_Lock);
  }
}
Esempio n. 8
0
int THQCreate(THQueue *pQueue_Arg)
#ifdef BODYDEF
{
	assert(pQueue_Arg);
	/* Allocate the buffer  */
	pQueue_Arg->ppBuf = malloc(sizeof(char *)*THQUEUE_SIZE);
	assert(pQueue_Arg->ppBuf);
	MutexInit(&pQueue_Arg->Lock);
	SemaphoreInit(&pQueue_Arg->Empty, THQUEUE_SIZE);
	SemaphoreInit(&pQueue_Arg->Full, 0);
	pQueue_Arg->Size = THQUEUE_SIZE;
	pQueue_Arg->Head = 0;
	pQueue_Arg->Tail = 0;


	return 0;
}
Esempio n. 9
0
void GBASIOLockstepInit(struct GBASIOLockstep* lockstep) {
    lockstep->players[0] = 0;
    lockstep->players[1] = 0;
    lockstep->players[2] = 0;
    lockstep->players[3] = 0;
    lockstep->multiRecv[0] = 0xFFFF;
    lockstep->multiRecv[1] = 0xFFFF;
    lockstep->multiRecv[2] = 0xFFFF;
    lockstep->multiRecv[3] = 0xFFFF;
    lockstep->attached = 0;
    lockstep->loaded = 0;
    lockstep->transferActive = false;
    lockstep->waiting = 0;
    lockstep->nextEvent = LOCKSTEP_INCREMENT;
    ConditionInit(&lockstep->barrier);
    MutexInit(&lockstep->mutex);
}
/*******************************************************************
 * Function bodies.
 *******************************************************************/
void fm_zonemap_pol_get_lineage_init()
{
	int32			*i_ptr = NULL;
	int32			error = 0;

	PIN_HEAP_VAR;

	PIN_ERR_LOG_MSG(PIN_ERR_LEVEL_DEBUG,
		"fm_zonemap_pol_get_lineage_init Enter");
	/*
	 * Init ProductHashArray
	 */
	if (nInitDone == PIN_BOOLEAN_FALSE) {
		nInitDone = PIN_BOOLEAN_TRUE;

		/*
		 * initialize the interval
		 */
	
		interval = 3600;
		/*
		 * read the value of this variable from the pin.conf file. If it
		 * doesn't exist then the default value stays.
		 */
		pin_conf("fm_zonemap_pol", "update_interval", PIN_FLDT_INT,
			(caddr_t *)&i_ptr, &error);
		if (error == PIN_ERR_NONE) {
			interval = *i_ptr;
		}
		if (i_ptr) {
			free(i_ptr);
		}
		PIN_SET_GLOBAL_HEAP;

		/*
		 * Init mutex
		 */
		MutexInit(HashTableLock);

		PIN_RESET_GLOBAL_HEAP;
	}
	PIN_ERR_LOG_MSG(PIN_ERR_LEVEL_DEBUG,
		"fm_zonemap_pol_get_lineage_init Exit");
}
Esempio n. 11
0
SafeScalarImpl::SafeScalarImpl(void)
{
    _mutex = MutexInit();
    _value = 0;
}
Esempio n. 12
0
avtExecutionManager::avtExecutionManager()
{
    tPool = NULL;
    MutexInit( &mutexMapLock );
    numThreads = 0;
}
/*******************************************************************
 * fm_zonemap_pol_get_lineage_get_zonemap():
 *******************************************************************/
static ZonemapHashEntry_t *
fm_zonemap_pol_get_lineage_get_zonemap(
	pcm_context_t		*pCtx, 
	poid_t			*pRoutingPoid, 
	poid_t			*pBrandPoid, 
	ZonemapHashEntry_t	**apHashTable, 
	const char		*pszTarget, 
	pin_errbuf_t		*ebufp) 
{

	int32			nTargetHash;
	int32			nLoadStatus;
	ZonemapHashEntry_t	*pHashEntry;
	ZonemapHashEntry_t	*pBucket;
	ZonemapHashEntry_t	*pCurNode;
	Blob_t			**ppBuffer;
	poid_t			**ppMatrixPoid;
	int32			*pnSearchMode;

	int32			nLoadFromDB;
	int32			nUnlockNode = PIN_BOOLEAN_FALSE;
	time_t			current_time = 0;

	PIN_HEAP_VAR;

	/* The HashTableLock has already been acquired by the time
	 * this procedure is called.
	 */
	pHashEntry = pBucket = NULL;

	if (PIN_ERR_IS_ERR(ebufp)) {
		return NULL;
	}

	/* Debug */
	PIN_ERR_LOG_MSG(PIN_ERR_LEVEL_DEBUG, 
		"fm_zonemap_pol_get_lineage_get_zonemap starting");

	/*
	 * Hash zone name
	 */
	nTargetHash = fm_zonemap_pol_get_lineage_hash_zone_name(pszTarget);

	/* Get bucket */
	pBucket = apHashTable[nTargetHash];

	/* Assume we don't have to load from DB */
	nLoadFromDB = PIN_BOOLEAN_FALSE;

	/*
	 * Have we already loaded this matrix?
	 */
	pHashEntry = fm_zonemap_pol_get_lineage_find_bucket(pBucket, 
		pszTarget, ebufp);
	if (pHashEntry != NULL) {
		
		/*
		 * do a revision check only if a certain time has 
		 * elapsed
		 */
		current_time = pin_virtual_time((time_t*)NULL);
		if(current_time > (pHashEntry->lastUpdate + interval)) {

			/*
			 * reset the timer
			 */
			 pHashEntry->lastUpdate = current_time;
			/*
			 * Yes: Has it changed since last load?
			 */
			if (fm_zonemap_pol_get_lineage_rev_changed(pCtx,
				pHashEntry->pMatrixPoid, ebufp)) {

				/*
				 * Yes: Free old data & reload matrix from DB
				 */

				/* Lock the node */
				MutexLock(pHashEntry->Lock);

				/* Remove node from bucket */
				for (pCurNode = pBucket; pCurNode != NULL;
					pCurNode = pCurNode->pNext) {

					if (pCurNode->pNext == pHashEntry) {
						pCurNode->pNext = 
							pHashEntry->pNext;
						break;
					}
				}

				PIN_SET_GLOBAL_HEAP;

				/* Free buffer */
				free(pHashEntry->pBuffer);
				pHashEntry->pBuffer = NULL;

				/* Free matrix poid */
				PIN_POID_DESTROY(pHashEntry->pMatrixPoid,
					NULL);
				pHashEntry->pMatrixPoid = NULL;

				PIN_RESET_GLOBAL_HEAP;

				/* Indicate that we need to load from DB */
				nLoadFromDB = PIN_BOOLEAN_TRUE;

				/* Indicate that we need to unlock the node */
				nUnlockNode = PIN_BOOLEAN_TRUE;
			}
		}
	}
	else {
		/*
		 * No: Add new node to bucket
		 */

		PIN_SET_GLOBAL_HEAP;

		/* Create & init new bucket */
		pHashEntry = (ZonemapHashEntry_t *) 
			malloc(sizeof(ZonemapHashEntry_t));

		/* Verify that memory was allocated */
		if (pHashEntry != NULL) {

			pHashEntry->pszZonemapName =
				malloc(strlen(pszTarget) + 1);
			if (pHashEntry->pszZonemapName == NULL) {
				pin_set_err(ebufp, PIN_ERRLOC_FM, 
					PIN_ERRCLASS_SYSTEM_DETERMINATE, 
					PIN_ERR_NO_MEM, 0, 0, 0);
				PIN_ERR_LOG_EBUF(PIN_ERR_LEVEL_ERROR, 
					"fm_zonemap_pol_get_lineage_get_zonemap: "
					"failed to allocate memory for zonemap name",
					ebufp);
				free(pHashEntry);
				pHashEntry = NULL;

				goto Done;
			}

			strcpy((char*) pHashEntry->pszZonemapName, pszTarget);
			pHashEntry->pNext = NULL;
			pHashEntry->pBuffer = NULL;

			/* Init semaphore */
			MutexInit(pHashEntry->Lock);
		}

		PIN_RESET_GLOBAL_HEAP;

		/* Verify that memory was allocated */
		if (pHashEntry == NULL) {

			pin_set_err(ebufp, PIN_ERRLOC_FM, 
				PIN_ERRCLASS_SYSTEM_DETERMINATE, 
				PIN_ERR_NO_MEM, 0, 0, 0);
			PIN_ERR_LOG_EBUF(PIN_ERR_LEVEL_ERROR, 
				"fm_zonemap_pol_get_lineage_get_zonemap: "
				"failed to allocate memory for hash entry",
				ebufp);

			goto Done;
		}

		/* Indicate that we need to load from DB */
		nLoadFromDB = PIN_BOOLEAN_TRUE;
	}

	/* Do we need to load from the database? */
	if (nLoadFromDB == PIN_BOOLEAN_TRUE) {
		/*
		 * Yes: Load matrix from DB
		 */

		/* Copy trie address from global to local mem */
		ppBuffer = &(pHashEntry->pBuffer);
		ppMatrixPoid = &(pHashEntry->pMatrixPoid);
		pnSearchMode = &(pHashEntry->nDefaultSearchMode);
				
		/* Attempt to read matrix from DB */
		nLoadStatus = fm_zonemap_pol_get_lineage_load_zonemap(
			pCtx, pszTarget, pRoutingPoid, ppMatrixPoid,pBrandPoid, 
			ppBuffer, pnSearchMode, ebufp);

		PIN_SET_GLOBAL_HEAP;

		/* Did we load a matrix? */
		if (nLoadStatus == PIN_BOOLEAN_FALSE) {
			/* Failed: set return value */
			/* If this happened on a RELOAD, we have more 
			 * work to do
			 */
			
			if (nUnlockNode == PIN_BOOLEAN_TRUE) {
				/*
				 * Yes: Unlock the node
				 */
				MutexUnlock(pHashEntry->Lock);
				nUnlockNode = PIN_BOOLEAN_FALSE;
			}

			free((char*)pHashEntry->pszZonemapName);
			pHashEntry->pszZonemapName = NULL;
			/* Destroy the mutex */
			MutexDestroy(pHashEntry->Lock);
			free(pHashEntry);
			pHashEntry = NULL;
		}
		else {

			/* 
			 * Add bucket to hash table
			 */
			pHashEntry->pNext = apHashTable[nTargetHash];
			apHashTable[nTargetHash] = pHashEntry;
			pHashEntry->lastUpdate = pin_virtual_time((time_t *)NULL);
			}

		PIN_RESET_GLOBAL_HEAP;

	}
Done:
	/* Do we need to unlock the node? */
	if (nUnlockNode == PIN_BOOLEAN_TRUE) {
		/*
		 * Yes: Unlock the node
		 */
		MutexUnlock(pHashEntry->Lock);
	}

	/* Debug */
	PIN_ERR_LOG_MSG(PIN_ERR_LEVEL_DEBUG, 
		"fm_zonemap_pol_get_lineage_get_zonemap returning");

	return pHashEntry;
}
Esempio n. 14
0
DtMail::Session::Session(DtMailEnv & error, const char * app_name)
: _events(16), _valid_keys(2048)
{
    _DtMutex = MutexInit();

    error.clear();

    _object_signature = 0;
    _cur_key = 0;

    // Create the ToolTalk session for managing file locking,
    // if one doesn't exist.
    _tt_channel = tt_default_procid();
    if (tt_pointer_error(_tt_channel) != TT_OK) {
	_tt_channel = ttdt_open(&_tt_fd, app_name, "SunSoft", "%I", 0);
	if (tt_pointer_error(_tt_channel) != TT_OK) {
	    error.setError(DTME_TTFailure);
	    DebugPrintf(1,
			"DtMail::createSession - ttdt_open returns %s\n",
			tt_status_message(tt_pointer_error(_tt_channel)));
	    return;
	}
    }
    else {
	_tt_fd = tt_fd();
    }

    // The event_fd is how we allow async behavior to occur in a
    // compatible way. We use a Unix domain socket as the file descriptor.
    // The client will watch for activity on this file descriptor, and
    // call our event routine when there is activity (either from XtMainLoop,
    // or through some other method).
    //
    pipe(_event_fd);

    _app_name = strdup(app_name);

    DtMailEnv b_error;

    _mail_rc = new MailRc(error, this);

    buildImplTable(error);
    if (error.isSet()) {
	return;
    }

    _obj_mutex = MutexInit();

    // The default implementation is specified via the DEFAULT_BACKEND
    // variable. If this is not set in the .mailrc, then choose entry
    // zero.
    //
    const char * value;
    _mail_rc->getValue(b_error, "DEFAULT_BACKEND", &value);
    if (b_error.isNotSet()) {
	_default_impl = lookupImpl(value);
	if (_default_impl < 0) {
	    _default_impl = 0;
	}
    }
    else {
	b_error.clear();
	_default_impl = 0;
    }

    DtMailSigChldList = new DtVirtArray<SigChldInfo *>(8);

    _busy_cb = NULL;
    _busy_cb_data = NULL;
    _canAutoSave = DTM_TRUE;

    _object_signature = SessionSignature;

    return;
}
Esempio n. 15
0
int 
tkNetMain(int pa_argn,char **in_args)
{
	struct KeyInfoCache        KeyInfoCache;
	struct ProcessingList      ProcList;
	struct BackGroundArgs      BkgdArgs;
	struct PeerData            PeerDataRoot;
	struct Iterator            ISeedPeer;
	struct Sock                MainSock;
	struct BridgeProc          BdgServerProc;
	struct BridgeProc          BdgClientProc;
	char                       BdgPeerAddrStr[32];
	char                       *pTargetName = NULL;
	BOOL                       ifClientSkipRegister = 1;
	int                        TestPurposeNatType;
	struct BridgeClientProcPa  *pBCPPa = NULL;

	printf("tknet \n build: " TKNET_VER "\n");

	tkNetInit();
	MutexInit(&g_BkgdMutex);

	ISeedPeer = GetIterator(NULL);

	PeerDataCons(&PeerDataRoot);
	PeerDataRoot.tpnd.RanPriority = 0;
	PeerDataRoot.addr.port = 0;
	PeerDataRoot.addr.IPv4 = 0;

	ProcessingListCons( &ProcList );

	RelayModuleInit();

	KeyInfoCacheCons(&KeyInfoCache);
	if(!KeyInfoReadFile(&KeyInfoCache,"tknet.info"))
	{
		printf("config file lost.\n");
		goto exit;
	}

	if(!KeyInfoTry(&KeyInfoCache,KEY_INFO_TYPE_CONFIG,&MainSock))
	{
		printf("bad config format.\n");
		goto exit;
	}
	
	if( g_TargetName[0] != '\0' )
	{
		printf("target name: %s \n", g_TargetName);
		tkNetConnect(NULL);
	}
	else
	{
		printf("target name unset. \n");
	}

	if(g_ifConfigAsFullCone)
	{
		g_NATtype = NAT_T_FULL_CONE;
		printf("config NAT type as fullcone.\n");
	}
	else
	{
		while(!KeyInfoTry(&KeyInfoCache,KEY_INFO_TYPE_STUNSERVER,&MainSock))
		{
			if(!KeyInfoTry(&KeyInfoCache,KEY_INFO_TYPE_MAILSERVER,&MainSock))
			{
				printf("No way to get NAT type.\n");
				goto exit;
			}
		}
		
		printf("NAT type got from STUN: %d\n",g_NATtype);
	}

	if(pa_argn == 2)
	{
		sscanf(in_args[1],"%d",&TestPurposeNatType);
		g_NATtype = (uchar)TestPurposeNatType;
	}
		
	printf("final NAT type: %d\n",g_NATtype);

	while(!KeyInfoTry(&KeyInfoCache,KEY_INFO_TYPE_BRIDGEPEER,&MainSock))
	{
		if(!KeyInfoTry(&KeyInfoCache,KEY_INFO_TYPE_MAILSERVER,&MainSock))
		{
			printf("no avalible Bridge peer.\n");
			goto no_bdg_peer;
		}
	}

	GetAddrText(&g_BdgPeerAddr,BdgPeerAddrStr);
	printf("using Bridge peer: %s\n",BdgPeerAddrStr);
	ifClientSkipRegister = 0;

no_bdg_peer:

	pBCPPa = BridgeMakeClientProc(&BdgClientProc,&MainSock,&ProcList,&g_BdgPeerAddr,
			g_MyName,g_NATtype,pTargetName,ifClientSkipRegister);
	ProcessStart(&BdgClientProc.proc,&ProcList);

	if(g_ifBkgdEnable)
		printf("back ground enabled.\n");
	else
		printf("back ground disabled.\n");

	BkgdArgs.pPeerDataRoot = &PeerDataRoot;
	BkgdArgs.pInfoCache = &KeyInfoCache;
	BkgdArgs.pProcList = &ProcList;
	BkgdArgs.pBdgClientProc = &BdgClientProc;
	BkgdArgs.pMainSock = &MainSock;
	tkBeginThread( &BackGround , &BkgdArgs );

	ConsAndStartBridgeServer(&BdgServerProc,&PeerDataRoot,&ProcList,&MainSock,&ISeedPeer);

	while( g_MainLoopFlag )
	{
		MutexLock(&g_BkgdMutex);
		if(!ifBkgdStunProc())
			SockRead(&MainSock);
		DoProcessing( &ProcList );
		if(!ifBkgdStunProc())
			MainSock.RecvLen = 0;
		MutexUnlock(&g_BkgdMutex);

		if(sta_ifBeginNewConnection && pBCPPa)
		{
			pBCPPa->pTargetNameID = g_TargetName;
			sta_ifBeginNewConnection = 0;
		}

		tkMsSleep(100);
	}

	SockClose(&MainSock);

	FreeBdgClientProc(&BdgClientProc);
	FreeBridgeServer(&BdgServerProc);

exit:

	PeerDataDestroy(&PeerDataRoot,&ISeedPeer);
	KeyInfoUpdate( &KeyInfoCache );
	KeyInfoWriteFile(&KeyInfoCache,"tknet.updateinfo");
	KeyInfoFree(&KeyInfoCache);
	RelayMuduleDestruction();
	MutexDelete(&g_BkgdMutex);
	tkNetUninit();

	return 0;
}
Esempio n. 16
0
FSTATUS
SMALoad(
	IN IBT_COMPONENT_INFO	 *ComponentInfo
	)
{
	FSTATUS				status = FSUCCESS;

	_DBG_ENTER_LVL(_DBG_LVL_MAIN, SmaLoad);
	_DBG_INIT;
	
	_TRC_REGISTER();

#if !defined(VXWORKS)
	_DBG_PRINT(_DBG_LVL_MAIN,  
	(" InfiniBand Subnet Management Agent. Built %s %s\n",\
	__DATE__, __TIME__ ));
#else
	_DBG_PRINT(_DBG_LVL_MAIN,  
	(" InfiniBand Subnet Management Agent. Built %s %s\n",\
	_DBG_PTR(__DATE__), _DBG_PTR(__TIME__) ));
#endif

    // Establish dispatch entry points for the functions supported.
	MemoryClear( ComponentInfo, sizeof(IBT_COMPONENT_INFO) );
	
	ComponentInfo->AddDevice = SmaAddDevice;
	ComponentInfo->RemoveDevice = SmaRemoveDevice;
	ComponentInfo->Unload = SMAUnload;

    // Read Global settings for the driver which may be set in a 
	// OS specific way.
	SmaInitGlobalSettings();
	
	// Allocate space for Global data
	g_Sma = (SMA_GLOBAL_INFO*)MemoryAllocate2AndClear(sizeof(SMA_GLOBAL_INFO), IBA_MEM_FLAG_PREMPTABLE, SMA_MEM_TAG);
	if ( NULL == g_Sma )
	{
		_DBG_ERROR(("MemAlloc failed for g_Sma!\n"));
		goto done;
	}

	// initialize global data
	g_Sma->NumCa = 0;
	g_Sma->CaObj = NULL;
	g_Sma->WorkReqRecv = g_Sma->WorkReqSend = NULL;

	g_Sma->SmUserTbl = NULL;
	g_Sma->NumUser = 0;

	// SpinLockInitState( &g_Sma->Lock )
	// SpinLockInit( &g_Sma->Lock )

	// Init Storage area for MADs
	g_Sma->Bin.NumBlocks = 0;
	g_Sma->Bin.Head = g_Sma->Bin.Tail = NULL;
	g_Sma->Bin.MemList = NULL;
	g_Sma->Bin.CurrentIndex = 0;			// set start mem index

	// Locks
	SpinLockInitState( &g_Sma->CaListLock );
	SpinLockInit( &g_Sma->CaListLock );
	SpinLockInitState( &g_Sma->Bin.Lock );
	SpinLockInit( &g_Sma->Bin.Lock );
	MutexInitState( &g_Sma->Bin.Mutex );
	MutexInit( &g_Sma->Bin.Mutex );
	SpinLockInitState( &g_Sma->RQ.Lock );
	SpinLockInit( &g_Sma->RQ.Lock );
	
	// Init Global Ibt user group for notifications
	IbtInitNotifyGroup(NotifyIbtCallback);

	// Allocate memory for Global GRH since the SMA does not need a GRH.
	// This memory will automatically get mapped to all CA registrations.
	g_Sma->GlobalGrh = CreateGlobalMemList( 0, sizeof(IB_GRH), 0, FALSE );
	if ( NULL == g_Sma->GlobalGrh )
	{
		status = FINSUFFICIENT_RESOURCES;
		goto failmemlist;
	}
		
	g_Sma->GlobalGrh->VirtualAddr = MemoryAllocate2AndClear(sizeof(IB_GRH), IBA_MEM_FLAG_PREMPTABLE, SMA_MEM_TAG);
	if ( NULL == g_Sma->GlobalGrh->VirtualAddr )
	{
		status = FINSUFFICIENT_RESOURCES;
		goto failgrhvirt;
	}
	g_Sma->GlobalGrh->AccessControl.AsUINT16 = 0;
	g_Sma->GlobalGrh->AccessControl.s.LocalWrite = 1;
	g_Sma->GlobalGrh->CaMemIndex = 0;

	// Increment index for future allocations
	g_Sma->Bin.CurrentIndex++;

done:
	_DBG_LEAVE_LVL(_DBG_LVL_MAIN);
    return status;

failgrhvirt:
	MemoryDeallocate( g_Sma->GlobalGrh );
failmemlist:
	IbtDestroyNotifyGroup();
	SpinLockDestroy( &g_Sma->RQ.Lock );
	MutexDestroy( &g_Sma->Bin.Mutex );
	SpinLockDestroy( &g_Sma->Bin.Lock );
	SpinLockDestroy( &g_Sma->CaListLock );
	MemoryDeallocate( g_Sma );
	goto done;
}
gxMutex::gxMutex(gxProcessType type)
{
  MutexInit(type);
}