示例#1
0
文件: task.cpp 项目: YellPika/cppchan
    void yield() {
        initialize();

        if (current_task) {
            current_task->yield();
        } else {
            queue.push_back(nullptr);

            while (!main_requested) {
                bool result;
                auto data = dequeue_task(0, result);
                if (!result)
                    continue;

                if (!data)
                    main_requested = true;
                else {
                    current_task = data.get();
                    if (*(data->update) && (*(data->update))())
                        queue.push_back(std::move(data));
                }
            }

            main_requested = false;
            current_task = nullptr;
        }
    }
示例#2
0
void schedule(unsigned int *stack){
	if(active == TRUE){
	  task_t* cur_task = dequeue_task();
	  if(cur_task != NULL){
	    cur_pid = cur_task->pid;
	    dbg_bochs_print("@@@@@@@");
	    dbg_bochs_print(cur_task->name);
	    if(cur_task->status!=NEW){
	      cur_task->esp=*stack;
	    } else {
	      cur_task->status=READY;	      
	      ((task_register_t *)(cur_task->esp))->eip = cur_task->eip;	      	      
	    }
	    enqueue_task(cur_task->pid, cur_task);
	    cur_task=get_task();
	    if(cur_task->status==NEW){
	      cur_task->status=READY;
	    }
	    dbg_bochs_print(" -- ");
	    dbg_bochs_print(cur_task->name);
	    dbg_bochs_print("\n");      
	    //load_pdbr(cur_task->pdir);
	    *stack = cur_task->esp;
	  } else {
	    enqueue_task(cur_task->pid, cur_task);
	  }
	}
	active = FALSE;
	return;
}
示例#3
0
文件: pool.c 项目: davidajulio/hx
static void* thread_loop(void *arg) {
  while (1) {
    struct task *task = dequeue_task();
    task->func();
    free(task);
  }
  return NULL;
}
示例#4
0
void schedule() {
  struct task_struct *next_task = NULL;
  struct task_struct *prev_task = current_task;

  register unsigned long sp asm ("sp");
  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, sp = %x\n", __func__, sp);

  need_reschedule = false;

  if (NULL == current_task) while(1);

  if (PROCESS_STATE_DEAD == current_task->sched_en.state) {
	int waiting_pid = current_task->sched_en.blocked_pid;
	if (-1 != waiting_pid) {
	  struct task_struct *waiting_task = find_task_by_pid(waiting_pid);
	  if (waiting_task) {
		dequeue_task(waiting_task);
		waiting_task->sched_en.state = PROCESS_STATE_READY;
		waiting_task->sched_en.blocking_pid = -1;
		enqueue_task(waiting_task, sched_enqueue_flag_timeout);
	  }
	}
	destroy_user_thread(current_task);
	current_task = NULL;
  } else
	scheduler->enqueue_task(current_task, sched_enqueue_flag_timeout);

  next_task = scheduler->pick_next_task();

  if (current_task)
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task->pid = %d\n", __func__, current_task->pid);
  if (next_task)
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, next_task->pid = %d\n", __func__, next_task->pid);

  if (current_task == next_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task == next_task\n", __func__);
	return;
  } else if (NULL == next_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, NULL == next_task\n", __func__);
	return;
  } else if (NULL == current_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, NULL == current_task\n", __func__);
	current_task = next_task;
  } else {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task != next_task\n", __func__);
	current_task = next_task;
  }

  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, context_switch %d <--> %d start\n", __func__, prev_task->pid, next_task->pid);  
  context_switch(prev_task, next_task);
  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, context_switch %d <--> %d finish\n", __func__, prev_task->pid, next_task->pid);
}
示例#5
0
文件: qdelay.c 项目: M-o-a-T/moat
void
dequeue_task_later(task_head *task)
{
	unsigned char sreg = SREG;
	cli();

	//assert(task->delay != TASK_MAGIC, "nondelayed task");
	if(task->delay == TASK_MAGIC) {
		dequeue_task(task);
		goto out;
	}

	(void) (
	dequeue_in(&head_usec,task) ||
	dequeue_in(&head_msec,task) ||
	dequeue_in(&head_sec ,task) ||
	0);
	/* Not finding the thing is Not An Error. */

out:
	SREG = sreg;
}
示例#6
0
文件: task.c 项目: codyd51/axle
void dequeue_task(task_small_t* task) {
    lock(mutex);
    if (task->queue < 0 || task->queue >= queues->size) {
        ASSERT(0, "Tried to remove %s from invalid queue %d", task->name, task->queue);
    }
    array_m* raw = array_m_lookup(queues, task->queue);

    int idx = array_m_index(raw, task);
    if (idx < 0) {
        printf_err("Tried to dequeue %s from queue %d it didn't belong to!", task->name, task->queue);
        //fall back on searching all queues for this task
        for (int i = 0; i < queues->size; i++) {
            array_m* queue = array_m_lookup(queues, i);
            for (int j = 0; j < queue->size; j++) {
                task_t* tmp = array_m_lookup(queue, j);
                if (task == tmp) {
                    //found task we were looking for
                    printf_info("Task was actually in queue %d", i);
                    array_m_remove(queue, j);
                    unlock(mutex);

                    return;
                }
            }
        }
        //never found the task!
        printf_err("Task %s did not exist in any queues!", task->name);
        return;
    }

    array_m_remove(raw, idx);
    unlock(mutex);

    //if for some reason this task is still in the queue (if it was added to queue twice),
    //dequeue it again
    if (array_m_index(raw, task) != ARR_NOT_FOUND) {
        dequeue_task(task);
    }
}
示例#7
0
文件: thread_pool.c 项目: tgdiriba/os
/*
 * Work loop for threads. Should be passed into the pthread_create() method.
 *
 */
static void *thread_do_work(void *void_pool)
{ 
  pool_t* pool = (pool_t*) void_pool;
  pthread_mutex_lock(&pool->lock);
  while(!pool->done) {
    pthread_cond_wait(&pool->notify, &pool->lock);
    pool_task_t* task = dequeue_task(&pool->tasks);
    pthread_mutex_unlock(&pool->lock);

    // Do work.
    if (task != NULL && task->function != NULL) {
      task->function(task->argument);
    }

    free(task);
    
    pthread_mutex_lock(&pool->lock);
  }
  pthread_mutex_unlock(&pool->lock);

  pthread_exit(NULL);
  return(NULL);
}
示例#8
0
int
erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
{
    ErtsRunQueue *runq;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    Port *pp;
    int port_is_dequeued = 0;

    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    ptp = handle2task(pthp);

    if (!ptp) {
	erts_smp_runq_unlock(runq);
	return 1;
    }

    ASSERT(ptp->handle == pthp);
    ptqp = ptp->queue;
    ASSERT(pp == ptqp->port);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ASSERT(ptqp);
    ASSERT(ptqp->first);

    dequeue_task(ptp);
    reset_handle(ptp);

    switch (ptp->type) {
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
	erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
	break;
    default:
	break;
    }

    ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq);

    if (ptqp->first || pp->sched.taskq != ptqp)
	ptqp = NULL;
    else {
	pp->sched.taskq = NULL;
	if (!pp->sched.exe_taskq) {
	    dequeue_port(runq, pp);
	    ERTS_PORT_NOT_IN_RUNQ(pp);
	    port_is_dequeued = 1;
	}
    }

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    erts_smp_runq_unlock(runq);
    
    if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
    	profile_runnable_port(pp, am_inactive);
    }

    port_task_free(ptp);
    if (ptqp)
	port_taskq_free(ptqp);

    return 0;
}
示例#9
0
void chpl_task_executeTasksInList(void** p_task_list_void) {
  task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void;
  task_pool_p curr_ptask;
  task_pool_p child_ptask;

  //
  // If we're serial, all the tasks have already been executed.
  //
  if (chpl_task_getSerial())
    return;

  curr_ptask = get_current_ptask();

  while (*p_task_list_head != NULL) {
    chpl_fn_p task_to_run_fun = NULL;

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    if ((child_ptask = *p_task_list_head) != NULL) {
      task_to_run_fun = child_ptask->fun;
      dequeue_task(child_ptask);
    }

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    if (task_to_run_fun == NULL)
      continue;

    set_current_ptask(child_ptask);

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_suspended(curr_ptask->id);
      chpldev_taskTable_set_active(child_ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    if (blockreport)
      initializeLockReportForThread();

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           child_ptask->filename,
                           child_ptask->lineno,
                           child_ptask->id,
                           child_ptask->is_executeOn);

    (*task_to_run_fun)(child_ptask->arg);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           child_ptask->filename,
                           child_ptask->lineno,
                           child_ptask->id,
                           child_ptask->is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(curr_ptask->id);
      chpldev_taskTable_remove(child_ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt--;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    set_current_ptask(curr_ptask);
    chpl_mem_free(child_ptask, 0, 0);

  }
}
示例#10
0
//
// When we create a thread it runs this wrapper function, which just
// executes tasks out of the pool as they become available.
//
static void
thread_begin(void* ptask_void) {
  task_pool_p ptask;
  thread_private_data_t *tp;

  tp = (thread_private_data_t*) chpl_mem_alloc(sizeof(thread_private_data_t),
                                               CHPL_RT_MD_THREAD_PRV_DATA,
                                               0, 0);
  chpl_thread_setPrivateData(tp);

  tp->lockRprt = NULL;
  if (blockreport)
    initializeLockReportForThread();

  while (true) {
    //
    // wait for a task to be present in the task pool
    //

    // In revision 22137, we investigated whether it was beneficial to
    // implement this while loop in a hybrid style, where depending on
    // the number of tasks available, idle threads would either yield or
    // wait on a condition variable to waken them.  Through analysis, we
    // realized this could potential create a case where a thread would
    // become stranded, waiting for a condition signal that would never
    // come.  A potential solution to this was to keep a count of threads
    // that were waiting on the signal, but since there was a performance
    // impact from keeping it as a hybrid as opposed to merely yielding,
    // it was decided that we would return to the simple yield case.
    while (!task_pool_head) {
      if (set_block_loc(0, CHPL_FILE_IDX_IDLE_TASK)) {
        // all other tasks appear to be blocked
        struct timeval deadline, now;
        gettimeofday(&deadline, NULL);
        deadline.tv_sec += 1;
        do {
          chpl_thread_yield();
          if (!task_pool_head)
            gettimeofday(&now, NULL);
        } while (!task_pool_head
                 && (now.tv_sec < deadline.tv_sec
                     || (now.tv_sec == deadline.tv_sec
                         && now.tv_usec < deadline.tv_usec)));
        if (!task_pool_head) {
          check_for_deadlock();
        }
      }
      else {
        do {
          chpl_thread_yield();
        } while (!task_pool_head);
      }

      unset_block_loc();
    }
 
    //
    // Just now the pool had at least one task in it.  Lock and see if
    // there's something still there.
    //
    chpl_thread_mutexLock(&threading_lock);
    if (!task_pool_head) {
      chpl_thread_mutexUnlock(&threading_lock);
      continue;
    }

    //
    // We've found a task to run.
    //

    if (blockreport)
      progress_cnt++;

    //
    // start new task; increment running count and remove task from pool
    // also add to task to task-table (structure in ChapelRuntime that keeps
    // track of currently running tasks for task-reports on deadlock or
    // Ctrl+C).
    //
    ptask = task_pool_head;
    idle_thread_cnt--;
    running_task_cnt++;

    dequeue_task(ptask);

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    tp->ptask = ptask;

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           ptask->filename,
                           ptask->lineno,
                           ptask->id,
                           ptask->is_executeOn);

    (*ptask->fun)(ptask->arg);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           ptask->filename,
                           ptask->lineno,
                           ptask->id,
                           ptask->is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_remove(ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    tp->ptask = NULL;
    chpl_mem_free(ptask, 0, 0);

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    //
    // finished task; decrement running count and increment idle count
    //
    assert(running_task_cnt > 0);
    running_task_cnt--;
    idle_thread_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);
  }
}
示例#11
0
void test_dequeue(){
	task_t* _task =  dequeue_task();
}
示例#12
0
文件: tasks-fifo.c 项目: bollu/chapel
void chpl_task_executeTasksInList(void** p_task_list_void) {
  task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void;
  task_pool_p curr_ptask;
  task_pool_p child_ptask;

  // Note: this function needs to tolerate an empty task
  // list. That will happen for coforalls inside a serial block, say.

  curr_ptask = get_current_ptask();

  while (*p_task_list_head != NULL) {
    chpl_fn_p task_to_run_fun = NULL;

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    if ((child_ptask = *p_task_list_head) != NULL) {
      task_to_run_fun = child_ptask->bundle.requested_fn;
      dequeue_task(child_ptask);
    }

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    if (task_to_run_fun == NULL)
      continue;

    set_current_ptask(child_ptask);

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_suspended(curr_ptask->bundle.id);
      chpldev_taskTable_set_active(child_ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    if (blockreport)
      initializeLockReportForThread();

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           child_ptask->bundle.requested_fid,
                           child_ptask->bundle.filename,
                           child_ptask->bundle.lineno,
                           child_ptask->bundle.id,
                           child_ptask->bundle.is_executeOn);

    (*task_to_run_fun)(&child_ptask->bundle);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           child_ptask->bundle.requested_fid,
                           child_ptask->bundle.filename,
                           child_ptask->bundle.lineno,
                           child_ptask->bundle.id,
                           child_ptask->bundle.is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(curr_ptask->bundle.id);
      chpldev_taskTable_remove(child_ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt--;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    set_current_ptask(curr_ptask);
    chpl_mem_free(child_ptask, 0, 0);

  }
}