Пример #1
0
bool
PoolCreate(TPool *  const poolP,
           uint32_t const zonesize) {

    bool success;
    bool mutexCreated;

    poolP->zonesize = zonesize;

    mutexCreated = MutexCreate(&poolP->mutexP);
    if (mutexCreated) {
        TPoolZone * const firstZoneP = PoolZoneAlloc(zonesize);

        if (firstZoneP != NULL) {
            poolP->firstzone   = firstZoneP;
            poolP->currentzone = firstZoneP;
            success = TRUE;
        } else
            success = FALSE;
        if (!success)
            MutexDestroy(poolP->mutexP);
    } else
        success = FALSE;

    return success;
}
Пример #2
0
Файл: mpd.c Проект: avih/miniweb
int ehMpd(MW_EVENT event, int argi, void* argp)
{
	switch (event) {
	case MW_INIT:
		if (mpConsoleMutex) return 0;	// already inited
		memset(&mpx,0,sizeof(mpx));
		MutexCreate(&mpConsoleMutex);
		if (loopclip) ThreadCreate(&mpThreadHandle, mpThread, 0);
		break;
	case MW_UNINIT:
		MutexDestroy(&mpConsoleMutex);
		mpClose();
		break;
	case MW_PARSE_ARGS: {
		int i = 0;
		char** argv = (char**)argp;
		for (i = 0; i < argi; i++) {
			if (!strcmp(argv[i], "--mploop")) {
				loopclip = argv[++i];
				break;
			} else if (!strcmp(argv[i], "--mpbin")) {
				mpbin = argv[++i];
			}
		}
		} break;
	}
	return 0;
}
Пример #3
0
/**
 *	Initializes the mutex's needed for these functions to work 
 */
void
tspinit( void )
{
        /*
         *      In WIN32 we will reopen the st out and in sources to
         *      allow ts functions to work even when this is not a
         *      console application.
         */
        //#ifdef GUCEF_MSWIN_BUILD
        //  #ifdef GUCEF_CORE_DEBUG_MODE        
        //  AllocConsole();
        //  #endif
        //
        ///* reopen stdin handle as console window input */
        //freopen("CONIN$","rb",stdin);
        ///* reopen stout handle as console window output */
        //freopen("CONOUT$","wb",stdout);
        ///* reopen stderr handle as console window output */
        //freopen("CONOUT$","wb",stderr);
        //#endif /* GUCEF_MSWIN_BUILD */
                
        if ( !init )
        {
                #ifdef USE_TSP_MUTEX
                lock = MutexCreate();
                if ( !lock ) return;
                #endif /* USE_TSP_MUTEX ? */  
                                
                init = 1;
        }
}
Пример #4
0
static void init (void)
{
  if (inited == false) {
    message(LOG_DEBUG, "ffmpegif", "initialize mutex");
    mutex = MutexCreate();
    inited = true;
  }
}
Пример #5
0
UvdState::UvdState()
{
    m_isRealtimeMode = false;
    m_realtimeStartTime = -1.0;

    m_yyyy = 0;

    memset(&m_recvStats, 0, sizeof(RecvStats));

    MutexCreate(&m_lock);
}
Пример #6
0
/**
 *	Function that creates a readers/writers lock data storage struct
 *      writer_overrules is a boolean. When non zero writers will be given
 *	a higher priority then readers. If 0 then readers will have priority
 *	over writers.
 */
TRWLock*
rwl_create( UInt32 writer_overrules )
{
	TRWLock *rwlock = malloc( sizeof( TRWLock ) );
        rwlock->delflag = 0;
        rwlock->rcount = 0;
        rwlock->wcount = 0;
        rwlock->wflag = 0;
        rwlock->wpriority = writer_overrules;
        rwlock->datalock = MutexCreate();
        return rwlock;
}
Пример #7
0
Log::Log()
	: timer(nullptr)
	, mutex(nullptr)
{
	Allocator* alloc = AllocatorGetThis();

	timer = TimerCreate(alloc);
	mutex = MutexCreate(alloc);

	LogAddHandler(this, LogConsoleHandler);

	if( !gs_Log ) LogSetDefault(this);
}
Пример #8
0
/* Register fs */
void VfsInstallFileSystem(MCoreFileSystem_t *Fs)
{
	/* Ready the buffer */
	char IdentBuffer[8];
	memset(IdentBuffer, 0, 8);

	/* Copy the storage ident over */
	strcpy(IdentBuffer, "St");
	itoa(GlbFileSystemId, (IdentBuffer + 2), 10);

	/* Construct the identifier */
	Fs->Identifier = MStringCreate(&IdentBuffer, StrASCII);

	/* Setup last */
	Fs->Lock = MutexCreate();

	/* Add to list */
	list_append(GlbFileSystems, list_create_node(Fs->DiskId, Fs));

	/* Increament */
	GlbFileSystemId++;

	/* Start init? */
	if (Fs->Flags & VFS_MAIN_DRIVE
		&& !GlbVfsInitHasRun)
	{
		/* Process Request */
		MCoreProcessRequest_t *ProcRequest
			= (MCoreProcessRequest_t*)kmalloc(sizeof(MCoreProcessRequest_t));

		/* Print */
		LogInformation("VFSM", "Boot Drive Detected, Running Init");

		/* Append init path */
		MString_t *Path = MStringCreate(Fs->Identifier->Data, StrUTF8);
		MStringAppendChars(Path, FILESYSTEM_INIT);

		/* Create Request */
		ProcRequest->Type = ProcessSpawn;
		ProcRequest->Path = Path;
		ProcRequest->Arguments = NULL;
		ProcRequest->Cleanup = 1;

		/* Send */
		PmCreateRequest(ProcRequest);

		/* Set */
		GlbVfsInitHasRun = 1;
	}
}
Пример #9
0
	void init(string ip, unsigned short _port) {
		Net::startup();

		try {
			socketClient.connectIPv4(ip, _port);
		}
		catch (...) {
			DebugError("Cannot connect to the server!");
			return;
		}

		threadRun = true;
		mutex = MutexCreate();
		t = ThreadCreate(networkThread, NULL);
	}
Пример #10
0
int PoolCreate(TPool *p,uint32 zonesize)
{
	/* sanity */
	if (!p) {
		return FALSE;
	}

	p->zonesize=zonesize;
	if (MutexCreate(&p->mutex))
		if (!(p->firstzone=p->currentzone=PoolZoneAlloc(zonesize)))
		{
			MutexFree(&p->mutex);
			return FALSE;
		};
	
	return TRUE;
}
Пример #11
0
static void
logOpen(struct _TServer * const srvP,
        const char **     const errorP) {

    bool success;

    success = FileOpenCreate(&srvP->logfileP, srvP->logfilename,
                             O_WRONLY | O_APPEND);
    if (success) {
        bool success;
        success = MutexCreate(&srvP->logmutexP);
        if (success) {
            *errorP = NULL;
            srvP->logfileisopen = TRUE;
        } else
            xmlrpc_asprintf(errorP, "Can't create mutex for log file");
            
        if (*errorP)
            FileClose(srvP->logfileP);
    } else
        xmlrpc_asprintf(errorP, "Can't open log file '%s'", srvP->logfilename);
}
Пример #12
0
static abyss_bool
logOpen(struct _TServer * const srvP) {

    abyss_bool success;

    success = FileOpenCreate(&srvP->logfile, srvP->logfilename,
                             O_WRONLY | O_APPEND);
    if (success) {
        abyss_bool success;
        success = MutexCreate(&srvP->logmutex);
        if (success)
            srvP->logfileisopen = TRUE;
        else
            TraceMsg("Can't create mutex for log file");

        if (!success)
            FileClose(&srvP->logfile);
    } else
        TraceMsg("Can't open log file '%s'", srvP->logfilename);

    return success;
}
Пример #13
0
	// -- Init / Shutdown --
	void WorkerThread::Make(WorkerThreadPool * pool, uint32 threadIndex)
	{
		//BOOST_LOG( gn_log::get() ) << "(Concurrent) \tWorker[ " << threadIndex << " ] starting up...\n";
		LogInfo("WorkerThread[%i] starting up...", threadIndex);
		
		Index_ = threadIndex;
		Pool_ = pool;
		TaskMutex_.reset( MutexCreate(AllocatorGetHeap()) );
		Tasks_.reserve(TaskCapacity);	// some random number of tasks
		
		if(Index_ != 0)	// so long as we're not the main thread...
		{
			//Thread_ = boost::thread( boost::bind( &Worker::Run, this ) );	// create the boost::thread to run our shit
			ThreadFunction runFn = MakeDelegate(this, &WorkerThread::Run);

			Thread_.reset( ThreadCreate(AllocatorGetHeap()) );
			ThreadStart(Thread_, runFn, nullptr);
		}
		// TODO: Fix this.
		else	// Set this here, as the Run() function, which sets it otherwise, won't get called.
			LocalWorkerThread_ = this;
			//ThisThread_.reset( (const_cast<Worker *>(this)) );
	}
Пример #14
0
/*----------------------------------------------------------------------------*
 *  NAME
 *      SchedInit
 *
 *  DESCRIPTION
 *      Prepare scheduler instance with identifier id. The scheduler instance
 *      will run in its own thread with given priority and stack size.
 *      The function returns the new scheduler instance. Priority and stack
 *      size values are passed unchanged to the ThreadCreate() call of the
 *      underlying Framework Extension API.
 *
 *      Valid id range is 0 to _SCHED_MAX_SEGMENTS - 1
 *
 *      Valid priority and stack size values are determined be the Framework
 *      Extensions API.
 *
 *  RETURNS
 *      The scheduler instance
 *
 *----------------------------------------------------------------------------*/
void *SchedInit(uint16 id, uint16 priority, uint32 stackSize)
{
    uint16 i = 0;

    /* Sanity check */
    if (id >= _SCHED_MAX_SEGMENTS) {
//        Panic(_TECH_FW, _PANIC_FW_UNEXPECTED_VALUE, "_SCHED_MAX_SEGMENTS exceeded");
    }

    /* Alloc instance */
    if (instance == NULL) {
        instance = (SchedulerInstanceType *) MemAlloc(sizeof(SchedulerInstanceType));
        if (instance == NULL) {
            GENINFO(("%s, line=%d error memory\n", __FUNCTION__, __LINE__));
            return (void*)NULL;
        }
        for (i = 0; i < _SCHED_MAX_SEGMENTS; i ++) {
            instance->thread[i].currentTask = _SCHED_TASK_ID;
        }

        MutexCreate(&instance->bgMutex);

        if (EventCreate(&instance->eventHandle)) {
       //     Panic(_TECH_FW, _PANIC_FW_UNEXPECTED_VALUE, "Event creation failed");
        }
    }

    instance->thread[id].inUse = TRUE;
    instance->threadIdVector |= 0x0001 << (uint16) id;

    /* Initial running state */
    instance->thread[id].schedRunning = FALSE;

    /* Set thread priority */
    instance->thread[id].priority = priority;

    /* Set thread stack size */
    instance->thread[id].stackSize = stackSize;

    /* Start initialisation of a single thread */
    instance->thread[id].init = TRUE;

    /* Store thread id */
    instance->thread[id].id = (uint8) id;

    /* Collect the number of tasks for this thread */
    instance->setupId = id;
    SchedTaskInit((void *) instance);

    /* Setup task structures for thread */
    instance->thread[id].tasks = MemAlloc(instance->thread[id].numberOfTasks * sizeof(TaskDefinitionType));
    if (instance->thread[id].tasks == NULL) {
        GENINFO(("%s, line=%d error memory\n", __FUNCTION__, __LINE__));
        return (void*)NULL;
    }

    /* Run task setup once more to transfer the function pointers */
    instance->thread[id].numberOfTasks = 0;
    instance->thread[id].init = FALSE;

    /* Prepare tasks */
    SchedTaskInit((void *) instance);

    instance->setupId = _SCHED_TASK_ID;

    if (MutexCreate(&(instance->thread[id].qMutex)) != _RESULT_SUCCESS) {
    //    Panic(_TECH_FW, _PANIC_FW_UNEXPECTED_VALUE, "Mutex creation failed");
    }

    if (EventCreate(&(instance->thread[id].eventHandle))) {
    //    Panic(_TECH_FW, _PANIC_FW_UNEXPECTED_VALUE, "Event creation failed");
    }

    /* Prepare queues */
    for (i = 0; i < instance->thread[id].numberOfTasks; i ++) {
        instance->thread[id].tasks[i].instanceDataPointer = NULL;
        instance->thread[id].tasks[i].messageQueueFirst = NULL;
        instance->thread[id].tasks[i].messageQueueLast = NULL;
    }

    return instance;
}
Пример #15
0
static void testMutexes()
{
	/* for now at least, be aware that a TMutex is just typedeffed
	   to a pthread_mutex_t */

	{
		/* test TMutexes that live on the stack */
		TMutex stackMutex;
		assert(MutexCreate(&stackMutex) == TRUE);
		assert(MutexCreate(&stackMutex) == TRUE);
		assert(MutexLock(&stackMutex) == TRUE);
		assert(MutexUnlock(&stackMutex) == TRUE);
		assert(MutexTryLock(&stackMutex) == TRUE);
		assert(MutexUnlock(&stackMutex) == TRUE);

		assert(MutexLock(&stackMutex) == TRUE);
		assert(MutexTryLock(&stackMutex) == FALSE);
		assert(MutexUnlock(&stackMutex) == TRUE);

		MutexFree(&stackMutex);
	}

	{
		/* just a sanity, please don't core on me test */
		TMutex *heapMutex = 0;

		assert(MutexCreate(heapMutex) == FALSE);
		assert(MutexLock(heapMutex) == FALSE);
		assert(MutexUnlock(heapMutex) == FALSE);
		assert(MutexTryLock(heapMutex) == FALSE);
		assert(MutexUnlock(heapMutex) == FALSE);

		assert(MutexLock(heapMutex) == FALSE);
		assert(MutexTryLock(heapMutex) == FALSE);
		assert(MutexUnlock(heapMutex) == FALSE);

		MutexFree(heapMutex);

		assert(heapMutex == 0);
	}

	{
		/* test TMutexes that live on the heap */
		TMutex *heapMutex = (TMutex*)malloc(sizeof(TMutex));

		assert(MutexCreate(heapMutex) == TRUE);
		assert(MutexLock(heapMutex) == TRUE);
		assert(MutexUnlock(heapMutex) == TRUE);
		assert(MutexTryLock(heapMutex) == TRUE);
		assert(MutexUnlock(heapMutex) == TRUE);

		assert(MutexLock(heapMutex) == TRUE);
		assert(MutexTryLock(heapMutex) == FALSE);
		assert(MutexUnlock(heapMutex) == TRUE);

		MutexFree(heapMutex);

		free(heapMutex);
		heapMutex = 0;
	}
}