bool	nonrecursive_lock::try_lock() noexcept
	{
		thread_id_t thread_id = get_thread_id();
		if (spin_loop(NO_LOCKED_TAG, thread_id, 0) == true)
		{
			return true;
		}

		return false;
	}
Exemple #2
0
/* Just calls dynamo_thread_under_dynamo.  We used to initialize dcontext here,
 * but that would end up initializing it twice.
 */
static void
thread_starting(dcontext_t *dcontext)
{
    ASSERT(dcontext->initialized);
    dynamo_thread_under_dynamo(dcontext);
#ifdef WINDOWS
    LOG(THREAD, LOG_INTERP, 2, "thread_starting: interpreting thread "TIDFMT"\n",
        get_thread_id());
#endif
}
Exemple #3
0
static inline 
void
own_recursive_lock(recursive_lock_t *lock)
{
    ASSERT(lock->owner == INVALID_THREAD_ID);
    ASSERT(lock->count == 0);
    lock->owner = get_thread_id();
    ASSERT(lock->owner != INVALID_THREAD_ID);
    lock->count = 1;
}
static void
_init_cfg_with_persist_file(const gchar *persist_file)
{
  main_thread_handle = get_thread_id();
  cfg = cfg_new_snippet();
  cfg->threaded = FALSE;
  cfg->state = persist_state_new(persist_file);
  cfg->keep_hostname = TRUE;
  persist_state_start(cfg->state);
}
Exemple #5
0
void resume_thread(const char *thread_name)
{
	int ret;

	ret = get_thread_id(thread_name);

	if(ret < 0)
		return;

	sceKernelResumeThread(ret);
}
Exemple #6
0
void do_on_thread(int thread, const callable_t &callable) {
    assert_good_thread_id(thread);

    if (thread == get_thread_id()) {
      // Run the function directly since we are already in the requested thread
      callable();
    } else {
      thread_doer_t<callable_t> *fsm = new thread_doer_t<callable_t>(callable, thread);
      fsm->run();
    }
}
    void thread_pool_implementation::
    thread (
    )
    {
        {
            // save the id of this worker thread into worker_thread_ids
            auto_mutex M(m);
            thread_id_type id = get_thread_id();
            worker_thread_ids.push_back(id);
        }

        task_state_type task;
        while (we_are_destructing == false)
        {
            long idx = 0;

            // wait for a task to do 
            { auto_mutex M(m);
                while ( (idx = find_ready_task()) == -1 && we_are_destructing == false)
                    task_ready_signaler.wait();

                if (we_are_destructing)
                    break;

                tasks[idx].is_being_processed = true;
                task = tasks[idx];
            }

            // now do the task
            if (task.bfp)
                task.bfp();
            else if (task.mfp0)
                task.mfp0();
            else if (task.mfp1)
                task.mfp1(task.arg1);
            else if (task.mfp2)
                task.mfp2(task.arg1, task.arg2);

            // Now let others know that we finished the task.  We do this
            // by clearing out the state of this task
            { auto_mutex M(m);
                tasks[idx].is_being_processed = false;
                tasks[idx].task_id = 0;
                tasks[idx].bfp.clear();
                tasks[idx].mfp0.clear();
                tasks[idx].mfp1.clear();
                tasks[idx].mfp2.clear();
                tasks[idx].arg1 = 0;
                tasks[idx].arg2 = 0;
                task_done_signaler.broadcast();
            }

        }
    }
Exemple #8
0
void
release_recursive_lock(recursive_lock_t *lock)
{
    ASSERT(lock->owner == get_thread_id());
    ASSERT(lock->count > 0);
    lock->count--;
    if (lock->count == 0) {
        lock->owner = INVALID_THREAD_ID;
        mutex_unlock(&lock->lock);
    }
}
Exemple #9
0
/* Return the time used by the program so far (user time + system time).  */
clock_t
clock ()
{
  thread_id thread;
  thread_info info;

  thread = get_thread_id(NULL);
  if( get_thread_info(thread,&info) != 0 )
    return (clock_t) -1;

  return (clock_t) info.ti_real_time;
}
Exemple #10
0
static int kbd_read( void* pNode, void* pCookie, off_t nPos, void* pBuf, size_t nLen )
{
    KbdVolume_s* 	psVolume = &g_sVolume;
    int		nError;
    if ( 0 == nLen ) {
	return( 0 );
    }
	
    for ( ;; )
    {
    	if ( atomic_read( &psVolume->nBytesReceived ) > 0 )
	{
	    int	nSize = min( nLen, atomic_read( &psVolume->nBytesReceived ) );
	    int	i;
	    char*	pzBuf = pBuf;

	    for ( i = 0 ; i < nSize ; ++i ) {
		pzBuf[ i ] = psVolume->zBuffer[ atomic_inc_and_read( &psVolume->nOutPos ) & 0xff ];
	    }
	    atomic_sub( &g_sVolume.nBytesReceived, nSize );
			
	    nError = nSize;
	}
	else
	{
	    int	nEFlg = cli();

	    if ( -1 != psVolume->hWaitThread )
	    {
		nError = -EBUSY;
		printk( "ERROR : two threads attempted to read from keyboard device!\n" );
	    }
	    else
	    {
		if ( 1 )
		{
		    nError = -EWOULDBLOCK;
		}
		else
		{
		    psVolume->hWaitThread = get_thread_id(NULL);
		    nError = suspend();
		    psVolume->hWaitThread = -1;
		}
	    }
	    put_cpu_flags( nEFlg );
	}
	if ( 0 != nError ) {
	    break;
	}
    }
    return( nError );
}
Exemple #11
0
void chan_post::print() const {
    std::cout 
        << "Board = " << get_board() << "\n"
        << "Thread_id = " << get_thread_id() << "\n"
        << "Post_id = " << get_id() << "\n"
        << "Content = " << get_com().substr(0, 160) << "\n"
        << "Filenames = [" << std::endl;
    for (const auto &a : get_filenames()) {
        std::cout << a << "\n";
    }
        std::cout << "]" << std::endl;
}
	bool	nonrecursive_lock::lock() noexcept
	{
		thread_id_t thread_id = get_thread_id();
		std::uint32_t spin_count = spin_count_;

		while (spin_loop(NO_LOCKED_TAG, thread_id, spin_count) == false)
		{
			std::this_thread::yield();
		}

		return true;
	}
Exemple #13
0
//Get the number of thread team
int get_team_num ()
{
	int i;
	int team_thread_id  = get_thread_id (get_level () -1);
	for (i = 0; i < MAX_TEAM_NUM && i < Team_num; ++i)
	{
		if (Team[i].team_flag == 1 && Team [i].task.thread_id == team_thread_id)
			return Team[i].team_num;
	}
	//If the thread team is not exist ,return 0
	return 0;
}
Exemple #14
0
bool                          
try_recursive_lock(recursive_lock_t *lock)
{                             
    /* ASSUMPTION: reading owner field is atomic */
    if (lock->owner == get_thread_id()) {
        lock->count++;
    } else {
        if (!mutex_trylock(&lock->lock))
            return false;
        own_recursive_lock(lock);
    }
    return true;
}
Exemple #15
0
	virtual void run()
	{
		// 在这个函数里实现需要池线程干的事
		if (_number_printed++ < 3)
		{
			// 只打印三次,以便观察效率
			printf("thread %u say hello.\n", get_thread_id());
		}

        // do_millisleep是由CPoolThread提供给子类睡眠用的,
        // 可以通过调用wakeup中断睡眠
		do_millisleep(1000);
	}
	bool	nonrecursive_lock::unlock() noexcept
	{
		// unlock은 실패하지 못하게 했다. 무한 루프를 돈다.
		thread_id_t thread_id = get_thread_id();
		std::uint32_t spin_count = spin_count_;

		while (spin_loop(thread_id, NO_LOCKED_TAG, spin_count) == false)
		{
			std::this_thread::yield();
		}

		return true;
	}
Exemple #17
0
//Add a new explicit task to the team, waitting to be scheduled.
//This function will only be called in Gomp_task () function
int add_etask (struct eTask *task)
{
	int i;
	int team_thread_id = get_thread_id (get_level () - 1);
	int num = get_thread_num ();
	struct eTask *parent_task;

	for (i = 0; i < MAX_TEAM_NUM && i < Team_num; ++i)
	{
		if (Team [i].team_flag == 1 && Team [i].task.thread_id == team_thread_id)
		{
			parent_task = Team [i].etask[num];

			task->parent = parent_task;
			task->children = NULL;
			task.kind = WAITTING_TASK;

			if (parent_task->children)
			{
				task->next_child = parent_task->children;
				task->prev_child = parent_task->children->prev_child;
				task->next_child->prev_child = task;
				task->prev_child->next_child = task;
			}
			else
			{
				task->next_child = task;
				task->prev_child = task;
			}

			parent_task->children = task;

			if (Team [i]->task_queue)
			{
				task->next_queue = team->task_queue;
				task->prev_queue = team->task_queue->prev_queue;
				task->next_queue->prev_queue = task;
				task->prev_queue->next_queue = task;
			}
			else
			{
				task->next_queue = task;
				task->prev_queue = task;
				Team [i].task->queue = task;
			}
			return 0;
		}
	}

	return -1;
}
    bool threaded_object::
    is_alive (
    ) const
    {
        auto_mutex M(m_);

        DLIB_ASSERT(id1 != get_thread_id() || id_valid == false,
               "\tbool threaded_object::is_alive()"
               << "\n\tYou can NOT call this function from the thread that executes threaded_object::thread"
               << "\n\tthis: " << this
        );

        return is_alive_;
    }
    void threaded_object::
    set_respawn (
    )
    {
        auto_mutex M(m_);

        DLIB_ASSERT(id1 != get_thread_id() || id_valid == false,
               "\tvoid threaded_object::set_respawn()"
               << "\n\tYou can NOT call this function from the thread that executes threaded_object::thread"
               << "\n\tthis: " << this
        );

        should_respawn_ = true;
    }
Exemple #20
0
/*
 * This is called to attempt to lock the inode associated with this
 * inode log item, in preparation for the push routine which does the actual
 * iflush.  Don't sleep on the inode lock or the flush lock.
 *
 * If the flush lock is already held, indicating that the inode has
 * been or is in the process of being flushed, then (ideally) we'd like to
 * see if the inode's buffer is still incore, and if so give it a nudge.
 * We delay doing so until the pushbuf routine, though, to avoid holding
 * the AIL lock across a call to the blackhole which is the buffercache. 
 * Also we don't want to sleep in any device strategy routines, which can happen
 * if we do the subsequent bawrite in here.
 */
STATIC uint
xfs_inode_item_trylock(
	xfs_inode_log_item_t	*iip)
{
	register xfs_inode_t	*ip;

	ip = iip->ili_inode;

	if (ip->i_pincount > 0) {
		return XFS_ITEM_PINNED;
	}

	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
		return XFS_ITEM_LOCKED;
	}

	if (!xfs_iflock_nowait(ip)) {
		/*
		 * If someone else isn't already trying to push the inode
		 * buffer, we get to do it.
		 */
		if (iip->ili_pushbuf_flag == 0) {
			iip->ili_pushbuf_flag = 1;
#ifdef DEBUG
			iip->ili_push_owner = get_thread_id();
#endif
			/*
			 * Inode is left locked in shared mode.
			 * Pushbuf routine gets to unlock it.
			 */
			return XFS_ITEM_PUSHBUF;
		} else {
			/*
			 * We hold the AIL_LOCK, so we must specify the
			 * NONOTIFY flag so that we won't double trip.
			 */
			xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
			return XFS_ITEM_FLUSHING;
		}
		/* NOTREACHED */
	}
#ifdef DEBUG
	if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
		ASSERT(iip->ili_format.ilf_fields != 0);
		ASSERT(iip->ili_logged == 0);
		ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL);
	}
#endif
	return XFS_ITEM_SUCCESS;
}
Exemple #21
0
iThread* iThreadMgr::this_thread()
{
	long tid = get_thread_id();
	for(std::vector< std::list<iThread*> >::iterator iter=s_threads.begin(); iter!=s_threads.end(); ++iter)
	{
		for(std::list<iThread*>::iterator ithr=iter->begin(); ithr!=iter->end(); ++ithr)
		{
			if((*ithr)->get_id() == tid)
				return *ithr;
		}
	}

	return NULL;
}
    void threaded_object::
    pause (
    )
    {
        auto_mutex M(m_);

        DLIB_ASSERT(id1 != get_thread_id() || id_valid == false,
               "\tvoid threaded_object::pause()"
               << "\n\tYou can NOT call this function from the thread that executes threaded_object::thread"
               << "\n\tthis: " << this
        );

        is_running_ = false;
    }
Exemple #23
0
/*
 * This is called to attempt to lock the dquot associated with this
 * dquot log item.  Don't sleep on the dquot lock or the flush lock.
 * If the flush lock is already held, indicating that the dquot has
 * been or is in the process of being flushed, then see if we can
 * find the dquot's buffer in the buffer cache without sleeping.  If
 * we can and it is marked delayed write, then we want to send it out.
 * We delay doing so until the push routine, though, to avoid sleeping
 * in any device strategy routines.
 */
STATIC uint
xfs_qm_dquot_logitem_trylock(
        xfs_dq_logitem_t	*qip)
{
	xfs_dquot_t	        *dqp;
	uint			retval;

	dqp = qip->qli_dquot;
	if (dqp->q_pincount > 0)
		return (XFS_ITEM_PINNED);
	
	if (! xfs_qm_dqlock_nowait(dqp))
		return (XFS_ITEM_LOCKED);
	
	retval = XFS_ITEM_SUCCESS;
	if (! xfs_qm_dqflock_nowait(dqp)) {
		/* 
                 * The dquot is already being flushed.  It may have been
                 * flushed delayed write, however, and we don't want to
                 * get stuck waiting for that to complete.  So, we want to check
                 * to see if we can lock the dquot's buffer without sleeping.
                 * If we can and it is marked for delayed write, then we
                 * hold it and send it out from the push routine.  We don't
                 * want to do that now since we might sleep in the device
                 * strategy routine.  We also don't want to grab the buffer lock
		 * here because we'd like not to call into the buffer cache
		 * while holding the AIL_LOCK.
		 * Make sure to only return PUSHBUF if we set pushbuf_flag
		 * ourselves.  If someone else is doing it then we don't 
		 * want to go to the push routine and duplicate their efforts.
                 */
                if (qip->qli_pushbuf_flag == 0) {
			qip->qli_pushbuf_flag = 1;
			ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
#ifdef DEBUG
			qip->qli_push_owner = get_thread_id();
#endif
			/*
			 * The dquot is left locked.
			 */
			retval = XFS_ITEM_PUSHBUF;
		} else {
			retval = XFS_ITEM_FLUSHING;
			xfs_dqunlock_nonotify(dqp);
		}
	}
	
	ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
	return (retval);
}
 bool multithreaded_object::
 should_stop (
 ) const
 {
     auto_mutex M(m);
     DLIB_ASSERT(thread_ids.is_in_domain(get_thread_id()),
            "\tbool multithreaded_object::should_stop()"
            << "\n\tYou can only call this function from one of the registered threads in this object"
            << "\n\tthis: " << this
     );
     while (is_running_ == false && should_stop_ == false)
         s.wait();
     return should_stop_;
 }
 bool threaded_object::
 should_stop (
 ) const
 {
     auto_mutex M(m_);
     DLIB_ASSERT(is_alive_ && id1 == get_thread_id() && id_valid == true,
            "\tbool threaded_object::should_stop()"
            << "\n\tYou can only call this function from the thread that executes threaded_object::thread"
            << "\n\tthis: " << this
     );
     while (is_running_ == false && should_stop_ == false)
         s.wait();
     return should_stop_;
 }
    void multithreaded_object::
    wait (
    ) const
    {
        auto_mutex M(m);

        DLIB_ASSERT(thread_ids.is_in_domain(get_thread_id()) == false,
               "\tvoid multithreaded_object::wait()"
               << "\n\tYou can NOT call this function from one of the threads registered in this object"
               << "\n\tthis: " << this
        );

        while (threads_started > 0)
            s.wait();
    }
Exemple #27
0
void
main_loop_init(void)
{
  service_management_publish_status("Starting up...");

  main_thread_handle = get_thread_id();
  main_loop_worker_init();
  main_loop_io_worker_init();
  main_loop_call_init();

  main_loop_init_events();
  if (!syntax_only)
    control_init(resolvedConfigurablePaths.ctlfilename);
  setup_signals();
}
    void threaded_object::
    wait (
    ) const
    {
        auto_mutex M(m_);

        DLIB_ASSERT(id1 != get_thread_id() || id_valid == false,
               "\tvoid threaded_object::wait()"
               << "\n\tYou can NOT call this function from the thread that executes threaded_object::thread"
               << "\n\tthis: " << this
        );

        while (is_alive_)
            s.wait();
    }
Exemple #29
0
//Remove an explicit task from the thread team
int remove_etask (struct eTask *task)
{
	int i;
	int team_thread_id = get_thread_id (get_level () - 1);

	for (i = 0; i < Team_num; ++i)
	{
		if (Team [i].team_flag == 1 && Team [i].task.thread_id == team_thread_id)
		{
			/*need to be done*/
			return 0;
		}
	}
	return -1;
}
Exemple #30
0
/* FIXME: rename recursive routines to parallel mutex_ routines */
void
acquire_recursive_lock(recursive_lock_t *lock)
{   
    /* we no longer use the pattern of implementing acquire_lock as a
       busy try_lock      
    */                    
                          
    /* ASSUMPTION: reading owner field is atomic */
    if (lock->owner == get_thread_id()) {
        lock->count++;
    } else {
        mutex_lock(&lock->lock);
        own_recursive_lock(lock);
    }   
}