Ejemplo n.º 1
0
// =============================================================================
// _fsTraceSetLevel
// -----------------------------------------------------------------------------
/// set trace level
// =============================================================================
PRIVATE VOID _fsTraceSetLevel(void)
{
	UINT16 traceLevel[SXS_NB_ID] = {0, };

	UINT16 cnt                         = 0;
	UINT8 targetTraceLevelFileName[30] = "/t/csd_fstracelevel.tfg";

	INT32 iResult                      = -1;
	INT32 fhd;
	UINT8 tmpTransfer = 0,*tmpTransfer1;
	traceLevel[__MMI] = 0xffff;
	traceLevel[__CSW] = 0xffff;

	CSW_TRACE(BASE_BAL_TS_ID, TSTXT("fsTrace_setTraceLevel OK.%d,%d\n"), __MMI, SXS_NB_ID);

	if((fhd =_fsTraceFileOpen(targetTraceLevelFileName, FS_O_RDONLY, 0)) >= 0)
	{
		iResult = FS_Read(fhd, (UINT8*)(traceLevel), SXS_NB_ID* 2);

		CSW_TRACE(BASE_BAL_TS_ID, TSTXT("fsTrace_setTraceLevel read result is %d\n"), iResult);

		cnt = 0;
		while (cnt < SXS_NB_ID)
		{

			/******CONVERT H8 AND L8,START ****/
			tmpTransfer1 = (UINT8*)(&traceLevel[cnt]);
			tmpTransfer =*tmpTransfer1;
			*tmpTransfer1 = *(tmpTransfer1+1);
			*(tmpTransfer1+1) = tmpTransfer;
			/******CONVERT H8 AND L8, END ****/

			CSW_TRACE(BASE_BAL_TS_ID, TSTXT("fstraces_setLevel(0x%x 0x%x"), TID(cnt), traceLevel[cnt]);
			fstraces_setLevel(TID(cnt), traceLevel[cnt]);
			++cnt;
		}

		iResult = _fsTraceFileClose(fhd);
		CSW_TRACE(BASE_BAL_TS_ID, TSTXT("fsTrace_setTraceLevel close result is %d\n"), iResult);

	}
	else  // set default level
	{
		while (cnt < SXS_NB_ID)
		{
			fstraces_setLevel(TID(cnt), traceLevel[cnt]);
			++cnt;
		}
	}

}
Ejemplo n.º 2
0
static void
drop_cb(struct pthread *td, void *arg)
{
	struct broadcast_arg *ba = arg;
	struct pthread_mutex *mp;
	struct pthread *curthread = ba->curthread;

	mp = td->mutex_obj;
	if (mp->m_owner == TID(curthread)) {
		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
			_thr_wake_all(curthread->defer_waiters,
				curthread->nwaiter_defer);
			curthread->nwaiter_defer = 0;
		}
		curthread->defer_waiters[curthread->nwaiter_defer++] =
			&td->wake_addr->value;
		mp->m_flags |= PMUTEX_FLAG_DEFERED;
	} else {
		if (ba->count >= MAX_DEFER_WAITERS) {
			_thr_wake_all(ba->waddrs, ba->count);
			ba->count = 0;
		}
		ba->waddrs[ba->count++] = &td->wake_addr->value;
	}
}
Ejemplo n.º 3
0
int
_pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{
	struct pthread *curthread = _get_curthread();
	pthread_rwlock_t prwlock;
	int ret;
	int32_t state;

	if (*rwlock == THR_PSHARED_PTR) {
		prwlock = __thr_pshared_offpage(rwlock, 0);
		if (prwlock == NULL)
			return (EINVAL);
	} else {
		prwlock = *rwlock;
	}

	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
		return (EINVAL);

	state = prwlock->lock.rw_state;
	if (state & URWLOCK_WRITE_OWNER) {
		if (__predict_false(prwlock->owner != TID(curthread)))
			return (EPERM);
		prwlock->owner = 0;
	}

	ret = _thr_rwlock_unlock(&prwlock->lock);
	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
		curthread->rdlock_count--;

	return (ret);
}
Ejemplo n.º 4
0
/*
 * Provide the equivelant to AIX pthread_getthreadid_np() function.
 */
int
_pthread_getthreadid_np(void)
{
    struct pthread *curthread;

    _thr_check_init();
    curthread = _get_curthread();
    return (TID(curthread));
}
Ejemplo n.º 5
0
static int
rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
{
	struct pthread *curthread = _get_curthread();
	pthread_rwlock_t prwlock;
	int ret;

	CHECK_AND_INIT_RWLOCK

	/*
	 * POSIX said the validity of the abstimeout parameter need
	 * not be checked if the lock can be immediately acquired.
	 */
	ret = _thr_rwlock_trywrlock(&prwlock->lock);
	if (ret == 0) {
		prwlock->owner = TID(curthread);
		return (ret);
	}

	if (__predict_false(abstime && 
	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
		return (EINVAL);

	for (;;) {
		/* goto kernel and lock it */
		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
		if (ret == 0) {
			prwlock->owner = TID(curthread);
			break;
		}

		if (ret != EINTR)
			break;

		/* if interrupted, try to lock it in userland again. */
		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
			ret = 0;
			prwlock->owner = TID(curthread);
			break;
		}
	}
	return (ret);
}
Ejemplo n.º 6
0
const unsigned char *cParseSeca::ProvIdPtr(const unsigned char *data)
{
  switch(TID(data)) {
    case 0x80:
    case 0x81: return ((struct SecaEcm *)data)->id;
    case 0x82: return ((struct SecaEmmUnique *)data)->id;
    case 0x84: return ((struct SecaEmmShared *)data)->id;
    }
  return 0;
}
Ejemplo n.º 7
0
int cParseSeca::KeyNr(const unsigned char *data)
{
  switch(TID(data)) {
    case 0x80:
    case 0x81: return ((struct SecaEcm *)data)->keyNr; break;
    case 0x82: return ((struct SecaEmmUnique *)data)->keyNr; break;
    case 0x84: return ((struct SecaEmmShared *)data)->keyNr; break;
    default: return -1;
    }
}
Ejemplo n.º 8
0
void Sav5::trade(std::shared_ptr<PKX> pk)
{
    if (pk->egg() && (otName() != pk->otName() || TID() != pk->TID() || SID() != pk->SID() || gender() != pk->otGender()))
    {
        pk->metDay(Configuration::getInstance().day());
        pk->metMonth(Configuration::getInstance().month());
        pk->metYear(Configuration::getInstance().year() - 2000);
        pk->metLocation(30003);
    }
}
Ejemplo n.º 9
0
int
_pthread_getcpuclockid(pthread_t pthread, clockid_t *clock_id)
{

	if (pthread == NULL)
		return (EINVAL);

	if (clock_getcpuclockid2(TID(pthread), CPUCLOCK_WHICH_TID, clock_id))
		return (errno);
	return (0);
}
Ejemplo n.º 10
0
static int
cond_signal_common(pthread_cond_t *cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread *td;
	struct pthread_cond *cvp;
	struct pthread_mutex *mp;
	struct sleepqueue *sq;
	int	*waddr;
	int	pshared;

	/*
	 * If the condition variable is statically initialized, perform dynamic
	 * initialization.
	 */
	CHECK_AND_INIT_COND

	pshared = CV_PSHARED(cvp);

	_thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters);

	if (pshared || cvp->__has_user_waiters == 0)
		return (0);

	curthread = _get_curthread();
	waddr = NULL;
	_sleepq_lock(cvp);
	sq = _sleepq_lookup(cvp);
	if (sq == NULL) {
		_sleepq_unlock(cvp);
		return (0);
	}

	td = _sleepq_first(sq);
	mp = td->mutex_obj;
	cvp->__has_user_waiters = _sleepq_remove(sq, td);
	if (mp->m_owner == TID(curthread)) {
		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
			_thr_wake_all(curthread->defer_waiters,
					curthread->nwaiter_defer);
			curthread->nwaiter_defer = 0;
		}
		curthread->defer_waiters[curthread->nwaiter_defer++] =
			&td->wake_addr->value;
		mp->m_flags |= PMUTEX_FLAG_DEFERED;
	} else {
		waddr = &td->wake_addr->value;
	}
	_sleepq_unlock(cvp);
	if (waddr != NULL)
		_thr_set_wake(waddr);
	return (0);
}
Ejemplo n.º 11
0
int cParseSeca::Payload(const unsigned char *data, const unsigned char **payload)
{
  int l;
  switch(TID(data)) {
    case 0x80:
    case 0x81: l=sizeof(struct SecaEcm); break;
    case 0x82: l=sizeof(struct SecaEmmUnique); break;
    case 0x84: l=sizeof(struct SecaEmmShared); break;
    default: return -1;
    }
  if(payload) *payload=&data[l];
  return CmdLen(data)-l+sizeof(struct SecaCmd);
}
Ejemplo n.º 12
0
jboolean cpuManager_unblock(ObjectDesc * self, CPUStateProxy * cpuStateProxy)
{
	jboolean ret;
	/*ThreadDesc *cpuState = cpuStateProxy->cpuState; */
	ThreadDesc *cpuState;
	if (cpuStateProxy == NULL)
		exceptionHandler(THROW_RuntimeException);

	DISABLE_IRQ;		/* because we access data in another domain (the TCB) */

	cpuState = cpuState2thread(cpuStateProxy);
	if (cpuState == NULL) {
		ret = JNI_FALSE;
		goto finish;
	}
	ASSERTTHREAD(cpuState);
#ifdef VERBOSE_UNBLOCK
	printf("UNBLOCK %d.%d by %d.%d\n", TID(cpuState), TID(curthr()));
	/*printf("CPU%d: unblock %p\n",get_processor_id(), cpuState); */
#endif
	if (cpuState->state != STATE_BLOCKEDUSER) {
#ifdef DEBUG
/*		printf("CPU%d: CPUManager::unblock: Thread %p is in state %d (%s)\n", get_processor_id(), cpuState,
		       cpuState->state, get_state(cpuState));
*/
#endif
		//printStackTrace("STACK: ", curthr(), base);
		cpuState->unblockedWithoutBeingBlocked = 1;
		ret = JNI_FALSE;
	} else {
		threadunblock(cpuState);
		ret = JNI_TRUE;
	}
      finish:
	RESTORE_IRQ;
	return ret;

}
Ejemplo n.º 13
0
int
_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
{
	struct pthread *curthread = _get_curthread();
	pthread_rwlock_t prwlock;
	int ret;

	CHECK_AND_INIT_RWLOCK

	ret = _thr_rwlock_trywrlock(&prwlock->lock);
	if (ret == 0)
		prwlock->owner = TID(curthread);
	return (ret);
}
Ejemplo n.º 14
0
int
_pthread_getaffinity_np(pthread_t td, size_t cpusetsize, cpuset_t *cpusetp)
{
	struct pthread	*curthread = _get_curthread();
	lwpid_t tid;
	int error;

	if (td == curthread) {
		error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
			-1, cpusetsize, cpusetp);
		if (error == -1)
			error = errno;
	} else if ((error = _thr_find_thread(curthread, td, 0)) == 0) {
		tid = TID(td);
		error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, tid,
			    cpusetsize, cpusetp);
		if (error == -1)
			error = errno;
		THR_THREAD_UNLOCK(curthread, td);
	}
	return (error);
}
Ejemplo n.º 15
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct thr_param param;
	struct sched_param sched_param;
	struct rtprio rtp;
	sigset_t set, oset;
	cpuset_t *cpusetp;
	int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;

	cpusetp = NULL;
	ret = cpusetsize = 0;
	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0) {
		_malloc_first_thread();
		if (_thr_setthreaded(1))
			return (EAGAIN);
	}

	curthread = _get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	memset(&param, 0, sizeof(param));

	if (attr == NULL || *attr == NULL)
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	else {
		new_thread->attr = *(*attr);
		cpusetp = new_thread->attr.cpuset;
		cpusetsize = new_thread->attr.cpusetsize;
		new_thread->attr.cpuset = NULL;
		new_thread->attr.cpusetsize = 0;
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;

		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
	}

	new_thread->tid = TID_TERMINATED;

	old_stack_prot = _rtld_get_stack_prot();
	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancel_enable = 1;
	new_thread->cancel_async = 0;
	/* Initialize the mutex queue: */
	for (i = 0; i < TMQ_NITEMS; i++)
		TAILQ_INIT(&new_thread->mq[i]);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;
		create_suspended = 1;
	} else {
		create_suspended = 0;
	}

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->flags |= THR_FLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);

	/*
	 * Handle the race between __pthread_map_stacks_exec and
	 * thread linkage.
	 */
	if (old_stack_prot != _rtld_get_stack_prot())
		_thr_stack_fix_protection(new_thread);

	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	param.start_func = (void (*)(void *)) thread_start;
	param.arg = new_thread;
	param.stack_base = new_thread->attr.stackaddr_attr;
	param.stack_size = new_thread->attr.stacksize_attr;
	param.tls_base = (char *)new_thread->tcb;
	param.tls_size = sizeof(struct tcb);
	param.child_tid = &new_thread->tid;
	param.parent_tid = &new_thread->tid;
	param.flags = 0;
	if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
		param.flags |= THR_SYSTEM_SCOPE;
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
		param.rtp = NULL;
	else {
		sched_param.sched_priority = new_thread->attr.prio;
		_schedparam_to_rtp(new_thread->attr.sched_policy,
			&sched_param, &rtp);
		param.rtp = &rtp;
	}

	/* Schedule the new thread. */
	if (create_suspended) {
		SIGFILLSET(set);
		SIGDELSET(set, SIGTRAP);
		__sys_sigprocmask(SIG_SETMASK, &set, &oset);
		new_thread->sigmask = oset;
		SIGDELSET(new_thread->sigmask, SIGCANCEL);
	}

	ret = thr_new(&param, sizeof(param));

	if (ret != 0) {
		ret = errno;
		/*
		 * Translate EPROCLIM into well-known POSIX code EAGAIN.
		 */
		if (ret == EPROCLIM)
			ret = EAGAIN;
	}

	if (create_suspended)
		__sys_sigprocmask(SIG_SETMASK, &oset, NULL);

	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->tid = TID_TERMINATED;
		new_thread->flags |= THR_FLAGS_DETACHED;
		new_thread->refcount--;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
		}
		_thr_try_gc(curthread, new_thread); /* thread lock released */
		atomic_add_int(&_thread_active_threads, -1);
	} else if (locked) {
		if (cpusetp != NULL) {
			if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
				TID(new_thread), cpusetsize, cpusetp)) {
				ret = errno;
				/* kill the new thread */
				new_thread->force_exit = 1;
				new_thread->flags |= THR_FLAGS_DETACHED;
				_thr_try_gc(curthread, new_thread);
				 /* thread lock released */
				goto out;
			}
		}

		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
out:
	if (ret)
		(*thread) = 0;
	return (ret);
}
Ejemplo n.º 16
0
bool cProviderSeca::MatchEMM(const unsigned char *data)
{
  return TID(data)==0x84 &&
         !memcmp(SID(data),provId,sizeof(provId)) && !memcmp(SA(data),sa,sizeof(sa));
}
Ejemplo n.º 17
0
bool cCardSeca::MatchEMM(const unsigned char *data)
{
  return TID(data)==0x82 &&
         !memcmp(UA(data),ua,sizeof(ua));
}
Ejemplo n.º 18
0
void Sav6::trade(std::shared_ptr<PKX> pk)
{
    PK6 *pk6 = (PK6*)pk.get();
    if (pk6->egg())
    {
        if (otName() != pk6->otName() || TID() != pk6->TID() || SID() != pk6->SID() || gender() != pk6->otGender())
        {
            pk6->metDay(Configuration::getInstance().day());
            pk6->metMonth(Configuration::getInstance().month());
            pk6->metYear(Configuration::getInstance().year() - 2000);
            pk6->metLocation(30002);
        }
        return;
    }
    else if (otName() == pk6->otName() && TID() == pk6->TID() && SID() == pk6->SID() && gender() == pk6->otGender())
    {
        pk6->currentHandler(0);

        if (!pk6->untraded() && (country() != pk6->geoCountry(0) || subRegion() != pk6->geoRegion(0)))
        {
            for (int i = 4; i > 0; i--)
            {
                pk6->geoCountry(pk6->geoCountry(i - 1), i);
                pk6->geoRegion(pk6->geoRegion(i - 1), i);
            }
            pk6->geoCountry(country());
            pk6->geoRegion(subRegion());
        }
    }
    else
    {
        if (otName() != pk6->htName() || gender() != pk6->htGender() || (pk6->geoCountry(0) == 0 && pk6->geoRegion(0) == 0 && !pk6->untradedEvent()))
        {
            for (int i = 4; i > 0; i--)
            {
                pk6->geoCountry(pk6->geoCountry(i - 1), i);
                pk6->geoRegion(pk6->geoRegion(i - 1), i);
            }
            pk6->geoCountry(country());
            pk6->geoRegion(subRegion());
        }

        if (pk6->htName() != otName())
        {
            pk6->htFriendship(pk6->baseFriendship());
            pk6->htAffection(0);
            pk6->htName(otName());
        }
        pk6->currentHandler(1);
        pk6->htGender(gender());

        if (pk6->htMemory() == 0)
        {
            pk6->htMemory(4);
            pk6->htTextVar(9);
            pk6->htIntensity(1);

            /*static constexpr u32 memoryBits[70] = { 
                0x000000, 0x04CBFD, 0x004BFD, 0x04CBFD, 0x04CBFD, 0xFFFBFB, 0x84FFF9, 0x47FFFF, 0xBF7FFA, 0x7660B0,
                0x80BDF9, 0x88FB7A, 0x083F79, 0x0001FE, 0xCFEFFF, 0x84EBAF, 0xB368B0, 0x091F7E, 0x0320A0, 0x080DDD,
                0x081A7B, 0x404030, 0x0FFFFF, 0x9A08BC, 0x089A7B, 0x0032AA, 0x80FF7A, 0x0FFFFF, 0x0805FD, 0x098278,
                0x0B3FFF, 0x8BBFFA, 0x8BBFFE, 0x81A97C, 0x8BB97C, 0x8BBF7F, 0x8BBF7F, 0x8BBF7F, 0x8BBF7F, 0xAC3ABE,
                0xBFFFFF, 0x8B837C, 0x848AFA, 0x88FFFE, 0x8B0B7C, 0xB76AB2, 0x8B1FFF, 0xBE7AB8, 0xB77EB8, 0x8C9FFD,
                0xBF9BFF, 0xF408B0, 0xBCFE7A, 0x8F3F72, 0x90DB7A, 0xBCEBFF, 0xBC5838, 0x9C3FFE, 0x9CFFFF, 0x96D83A,
                0xB770B0, 0x881F7A, 0x839F7A, 0x839F7A, 0x839F7A, 0x53897F, 0x41BB6F, 0x0C35FF, 0x8BBF7F, 0x8BBF7F 
            };*/

            u32 bits = 0x04CBFD; //memoryBits[pk6->htMemory()];
            while (true)
            {
                u32 feel = randomNumbers() % 20;
                if ((bits & (1 << feel)) != 0)
                {
                    pk6->htFeeling(feel);
                    break;
                }
            }
        }
    }
}
Ejemplo n.º 19
0
/**
 * The function passed to created threads.
 *
 * NOTE: is_empty(queue) is called a lot: It should be noted that we
 * must make sure it's only called when we have the queue lock!
 *
 * 1. Lock the task queue. We're using a condition lock, so we'll
 *    give up the lock until there is a task to run OR the tpDestroy
 *    function sent a broadcast to all threads that they should clean
 *    up.
 * 2. Wait for the signal (task inserted, or tp being destroyed).
 * 3. Now that the queue is locked, check the destruction state. This
 *    state should be valid because a. the change from ALIVE to
 *    something else is a one-way change, b. even if the following
 *    happened:
 *    - Task added
 *    - Thread got out of the WHILE loop
 *    - CONTEXT SWITCH
 *    - Main thread (pool creator) called tp_destroy, state changed
 *    - CONTEXT SWITCH
 *    - Back to our thread, got to the switch() statement and found
 *      out we're dying
 *    This is the desired behaviour (Piazza @281) - we do not need to
 *    make sure tasks added before calls to tpDestroy will  be executed
 *    if tpDestroy is called in DO_RUN mode, even if all threads were
 *    available when the task was added.
 * 4. If we're ALIVE, that means pool->queue IS NOT EMPTY (otherwise we
 *    would still be in the while loop, because you can't change DO_RUN
 *    or DO_ALL back to ALIVE so there's no danger we left the while()
 *    loop because of state!=ALIVE but got to state==ALIVE in the
 *    switch), so we can just dequeue a task and run it (remember to
 *    unlock before running!).
 * 5. If we're DO_ALL, it's like ALIVE but first check if there's
 *    something to run (unlike the ALIVE state, we don't know for sure).
 *    If there is, run it; otherwise, exit (no more tasks will come).
 * 6. If we're DO_RUN, exit. Don't take another task, leave them to rot.
 * 7. Rinse and repeat
 */
void* thread_func(void* void_tp) {
	
	int pid = TID();
	
	// Some useful variables
	State state;
	Task* t;
	ThreadPool* tp = (ThreadPool*)void_tp;
	
#if HW3_DEBUG
	// Initialize tp->tids
	pthread_t self = pthread_self();
	int thread_i;
	for (thread_i=0; thread_i<tp->N; ++thread_i)
		if (pthread_equal(tp->threads[thread_i],self)) {
			tp->tids[thread_i]=pid;
			break;
		}
#endif
	PRINT("Thread %d started it's function\n",pid);
	
	// Main thread task
	while(1) {
		
		// Get the initial state and the task lock, when we need it (task to do or we're dying)
		// IMPORTANT: LOCK THE TASK LOCK BEFORE READING THE STATE!
		// Otherwise, we can see this situation:
		// - T1 reads the state, it's ALIVE
		// - CS-->main thread
		// - Main thread calls tpDestroy
		// - Main thread broadcasts, starts waiting for all threads
		// - CS-->T1
		// - T1 locks the task lock (remember: state==ALIVE)
		// - The task queue is empty and state==ALIVE so T1 will wait for a signal that will never come.
		// Hence, DO NOT do this:
		// 1. state = read_state(tp);
		// 2. pthread_mutex_lock(&tp->task_lock);
		// But do it the other way round:
		pthread_mutex_lock(&tp->task_lock);										// This is OK because during INIT, we don't lock the task queue (after its creation)
		state = read_state(tp);
		PRINT("Thread %d locked the task queue\n",pid);
		while (osIsQueueEmpty(tp->tasks) && state == ALIVE) {					// Wait for a task OR the destruction of the pool
			PRINT("Thread %d started waiting for a signal\n",pid);
			pthread_cond_wait(&tp->queue_not_empty_or_dying,&tp->task_lock);	// Either one gives a signal
			state = read_state(tp);
			PRINT("Thread %d got the signal and locked the lock\n",pid);
		}
		PRINT("Thread %d got out of the while() loop, state==%s\n",pid,state_string(read_state(tp)));
		switch(state) {
			case ALIVE:											// If we're not dying, take a task and do it.
				t = (Task*)osDequeue(tp->tasks);
				pthread_mutex_unlock(&tp->task_lock);
				PRINT("Thread %d doing it's task\n",pid);
				t->func(t->param);
				free(t);
				break;
			case DO_ALL:										// If we're dying, but we should clean up the queue:
				if (!osIsQueueEmpty(tp->tasks)) {				// THIS TEST IS NOT USELESS! We may have got here
					t = (Task*)osDequeue(tp->tasks);			// via a broadcast() call from tp_destroy and the
					pthread_mutex_unlock(&tp->task_lock);		// state may be DO_ALL but is_empty() may be true...
					PRINT("Thread %d doing it's task\n",pid);	// Thus, the while() loop terminated and we got here.
					t->func(t->param);
					free(t);
				}
				else {											// If we're here, there are no more tasks to dequeue!
					pthread_mutex_unlock(&tp->task_lock);		// As we're being destroyed anyway, exit.
					PRINT("Thread %d unlocked the lock and returning\n",pid);
					return NULL;
				}
				break;
			case DO_RUN:										// If we're dying and no more tasks should be done,
				pthread_mutex_unlock(&tp->task_lock);			// just exit before dequeuing anything...
				PRINT("Thread %d unlocked the lock and returning\n",pid);
				return NULL;
				break;
		}
	}
}
Ejemplo n.º 20
0
jint cpuManager_dump(ObjectDesc * self, ObjectDesc * msg, ObjectDesc * ref)
{
	char c[128];
	int i = 0;
	ClassDesc *cl;
	c[0] = '\0';
	if (msg != NULL)
		stringToChar(msg, c, sizeof(c));
	//printStackTraceNew("DUMP");
	printf("DUMP %s 0x%lx ", c, ref);
	if (ref == NULL)
		return;
	if ((getObjFlags(ref) & FLAGS_MASK) == OBJFLAGS_MEMORY) {
#if 0
		MemoryProxy *m = (MemoryProxy *) ref;
		printf("   MEMORY: dz=%p mem=%p size=%d valid=%d refcount=%d\n", m->dz, m->mem, m->size, m->dz->valid,
		       m->dz->refcount);
#ifdef DEBUG_MEMORY_CREATION
		if (m->dz->createdBy) {
			printf("     created at : ");
			print_eip_info(m->dz->createdAt);
			printf(", by: %s ", m->dz->createdBy->domainName);
			if (m->dz->createdUsing)
				printf(" using %s", m->dz->createdUsing);
#endif
		} else {
			printf(", unknown creator");
		}
		printf("\n");
		{
#if 0
			DZMemoryProxy *dz;
			printf("--\n");
			dz = m->dz;
			while (dz->prev) {
				printf("  PREV %p owner=%d size=%d valid=%d\n", dz->prev, dz->prevOwner, dz->prev->size,
				       dz->prev->valid);
				dz = dz->prev;
				if (i++ > 20)
					sys_panic("POSSIBLE CIRCULARITY IN MEMORY CHAIN");
			}
			dz = m->dz;
			while (dz->next) {
				printf("  NEXT %p owner=%d size=%d valid=%d\n", dz->next, dz->nextOwner, dz->next->size,
				       dz->next->valid);
				dz = dz->next;
			}
		}
#endif

#endif				/* DEBUG_MEMORY_CREATION */
		{
			int i;
			ClassDesc *cd = obj2ClassDesc(ref);
			ASSERTCLASSDESC(cd);
			/*
			   printf("vtable: \n");
			   for(i=0; i<cd->vtableSize; i++) {
			   if (cd->vtable[i] != NULL) printf("%d %p\n", i, cd->vtable[i]);
			   }
			 */
		}
	} else if ((getObjFlags(ref) & FLAGS_MASK) == OBJFLAGS_PORTAL) {
		printf("   PORTAL: index=%d\n", ((Proxy *) ref)->index);
		//dumpVTable(ref, cl->vtableSize);
	} else if ((getObjFlags(ref) & FLAGS_MASK) == OBJFLAGS_OBJECT) {
		cl = obj2ClassDesc(ref);
		printf("     INSTANCE of class: %s\n", cl->name);
	} else if ((getObjFlags(ref) & FLAGS_MASK) == OBJFLAGS_CAS) {
		printf("     CAS\n");
	} else if ((getObjFlags(ref) & FLAGS_MASK) == OBJFLAGS_SERVICE) {
		DEPDesc *s = (DEPDesc *) ref;
		printf("     Service: interface=%s\n", s->interface->name);
	} else if ((getObjFlags(ref) & FLAGS_MASK) == OBJFLAGS_SERVICE_POOL) {
		printf("     Servicepool\n");
#if 0
	} else if (getObjFlags(ref) == OBJFLAGS_EXTERNAL_CPUSTATE) {
		printf("     CPUSTATE thread %d.%d\n", TID(cpuState2thread(ref)));
		//printStackTraceNew("CPUSTATE");
#endif
	} else {
		printf("     unknown object type. flags=(%p)\n", getObjFlags(ref));
	}
	return 0;
}