Пример #1
0
void move_task_to_kill_queue(task_t *t, int locked)
{
	if(locked)
		tqueue_remove_nolock(primary_queue, t->listnode);
	else
		tqueue_remove(primary_queue, t->listnode);
	raise_task_flag(t, TF_KILLREADY);
}
Пример #2
0
void task_almost_block(struct llist *list, task_t *task)
{
	int old = set_int(0);
	task->blocklist = list;
	ll_do_insert(list, task->blocknode, (void *)task);
	tqueue_remove(((cpu_t *)task->cpu)->active_queue, task->activenode);
	task->state = TASK_ISLEEP;
	assert(!set_int(old));
}
Пример #3
0
/* we set interrupts to zero here so that we may use rwlocks in
 * (potentially) an interrupt handler */
void task_block(struct llist *list, task_t *task)
{
	int old = set_int(0);
	task->blocklist = list;
	ll_do_insert(list, task->blocknode, (void *)task);
	tqueue_remove(((cpu_t *)task->cpu)->active_queue, task->activenode);
	task_pause(task);
	assert(!set_int(old));
}
Пример #4
0
__attribute__((always_inline)) inline void set_as_dead(task_t *t)
{
	assert(t);
	sub_atomic(&running_processes, 1);
	sub_atomic(&(((cpu_t *)t->cpu)->numtasks), 1);
	set_int(0);
	raise_flag(TF_DYING);
	tqueue_remove(((cpu_t *)t->cpu)->active_queue, t->activenode);
	t->state = TASK_DEAD;
}
Пример #5
0
int
mon_start_user (int argc, char **argv, struct Trapframe *tf)
{
    unsigned int idle_pid;
    idle_pid = proc_create (_binary___obj_user_idle_idle_start, 10000);
    KERN_DEBUG("process idle %d is created.\n", idle_pid);

    KERN_INFO("Start user-space ... \n");

    tqueue_remove (NUM_IDS, idle_pid);
    tcb_set_state (idle_pid, TSTATE_RUN);
    set_curid (idle_pid);
    kctx_switch (0, idle_pid);

    KERN_PANIC("mon_startuser() should never reach here.\n");
}
Пример #6
0
static void
kern_main (void)
{
    KERN_INFO("[BSP KERN] In kernel main.\n\n");
    
    KERN_INFO("[BSP KERN] Number of CPUs in this system: %d. \n", pcpu_ncpu());

    int cpu_idx = get_pcpu_idx();
    unsigned int pid;

    /*

    int i;
    all_ready = FALSE;
    for (i = 1; i < pcpu_ncpu(); i++){
        KERN_INFO("[BSP KERN] Boot CPU %d .... \n", i);

        bsp_kstack[i].cpu_idx = i;
        pcpu_boot_ap(i, kern_main_ap, (uintptr_t) &(bsp_kstack[i]));

        while (get_pcpu_boot_info(i) == FALSE);

        KERN_INFO("[BSP KERN] done.\n");

    }

    all_ready = TRUE;
    */
    
    pid = proc_create (_binary___obj_user_idle_idle_start, 1000);
    KERN_INFO("CPU%d: process idle %d is created.\n", cpu_idx, pid);
    tqueue_remove (NUM_IDS, pid);
    tcb_set_state (pid, TSTATE_RUN);
    set_curid (pid);
    kctx_switch (0, pid); 

    KERN_PANIC("kern_main_ap() should never reach here.\n");
}
Пример #7
0
void tm_thread_do_exit(void)
{
	assert(current_thread->held_locks == 0);
	assert(current_thread->blocklist == 0);

	struct async_call *thread_cleanup_call = async_call_create(&current_thread->cleanup_call, 0, 
							tm_thread_destroy, (unsigned long)current_thread, 0);

	struct ticker *ticker = (void *)atomic_exchange(&current_thread->alarm_ticker, NULL);
	if(ticker) {
		if(ticker_delete(ticker, &current_thread->alarm_timeout) != -ENOENT)
			tm_thread_put(current_thread);
	}

	linkedlist_remove(&current_process->threadlist, &current_thread->pnode);

	tm_thread_remove_kerfs_entries(current_thread);
	atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed);
	if(atomic_fetch_sub(&current_process->thread_count, 1) == 1) {
		atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed);
		tm_process_remove_kerfs_entries(current_process);
		tm_process_exit(current_thread->exit_code);
	}

	cpu_disable_preemption();

	assert(!current_thread->blocklist);
	tqueue_remove(current_thread->cpu->active_queue, &current_thread->activenode);
	atomic_fetch_sub_explicit(&current_thread->cpu->numtasks, 1, memory_order_relaxed);
	tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
	current_thread->state = THREADSTATE_DEAD;
	
	workqueue_insert(&__current_cpu->work, thread_cleanup_call);
	cpu_interrupt_set(0); /* don't schedule away until we get back
							 to the syscall handler! */
	cpu_enable_preemption();
}
Пример #8
0
// public function definitions
void *arpd(void *threadarg) {
  assert(threadarg);

  struct thread_context *context;
  struct thread_context *contexts;
  struct arpd_data *data;
  struct in_addr *my_addr;
  int rv;

  struct transaction *transaction = NULL;
  struct ethernet_pkt *etherpkt;
  struct arp_pkt *arp;
  struct netmap_ring *rxring;
  void *ring_idx;
  uint32_t dispatcher_idx;
  struct msg_hdr *msg_hdr;

  context = (struct thread_context *)threadarg;
  contexts = context->shared->contexts;
  data = context->data;
  rxring = data->rxring;
  dispatcher_idx = context->shared->dispatcher_idx;
  my_addr = &context->shared->inet_info->addr;

  rv = arpd_init(context);
  if (!rv) {
    pthread_exit(NULL);
  }

  printf("arpd[%d]: initialized\n", context->thread_id);
  // signal to main() that we are initialized
  atomic_store_explicit(&context->initialized, 1, memory_order_release);

  // main event loop
  for (;;) {
    // read all the incoming packets
    while (tqueue_remove(context->pkt_recv_q, &transaction, 
            &ring_idx) > 0) {
      etherpkt = (struct ethernet_pkt *) NETMAP_BUF(rxring, 
                                    rxring->slot[(uint32_t)ring_idx].buf_idx);
      arp = (struct arp_pkt*) etherpkt->data;

      if (!arp_is_valid(arp)) {
        send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                            (uint32_t)ring_idx);
        continue;
      }

      if (arp->arp_h.ar_op == ARP_OP_REQUEST) {
        if (arp->tpa.s_addr != my_addr->s_addr) {
          send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                              (uint32_t) ring_idx);
          continue;
        }

        printf("R)");
        arp_print_line(arp);

        // send_pkt_arp_reply could fail when xmit queue is full,
        // however, the sender should just resend a request
        send_pkt_arp_reply(context->pkt_xmit_q, &arp->spa, &arp->sha);
      } else {  // ARP_OP_REPLY
        if (!arp_reply_filter(arp, my_addr)) {
          send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                              (uint32_t) ring_idx);
          continue;
        }

        printf("R)");
        arp_print_line(arp);

        // TODO: also check against a list of my outstanding arp requests
        // prior to insertion in the arp cache
        recv_pkt_arp_reply(arp, data->arp_cache, contexts);
      }

      send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                          (uint32_t) ring_idx);
    } // while (packets)

    // resend outstanding requests and refresh expiring entries
    update_arp_cache(data->arp_cache, contexts, context->pkt_xmit_q);

    // TODO: read all the messages
    rv = squeue_enter(context->msg_q, 1);
    if (!rv)
      continue;

    while ((msg_hdr = squeue_get_next_pop_slot(context->msg_q)) != NULL) {
      switch (msg_hdr->msg_type) {
        case MSG_ARPD_GET_MAC:
          recv_msg_get_mac((void *)msg_hdr, data->arp_cache,
                            contexts, context->pkt_xmit_q);
          break;
        default:
          printf("arpd: unknown message %hu\n", msg_hdr->msg_type);
      }
    }
    squeue_exit(context->msg_q);

    usleep(ARP_CACHE_RETRY_INTERVAL);
  } // for (;;)

  pthread_exit(NULL);
}
Пример #9
0
// public function definitions
void *worker(void *threadarg) {
  assert(threadarg);

  struct thread_context *context;
  struct thread_context *contexts;
  struct thread_context *dispatcher;
  struct worker_data *data;
  int rv;

  struct transaction *transaction = NULL;
  struct netmap_ring *rxring;
  void *ring_idx;
  uint32_t *slots_read;
  pktbuff *pktbuff_in, *pktbuff_out;
  int pktbuff_used = 1;
  struct pcb *pcb;
  struct msg_hdr *msg_hdr;

  context = (struct thread_context *)threadarg;
  contexts = context->shared->contexts;
  data = context->data;
  dispatcher = &contexts[context->shared->dispatcher_idx];

  rxring = data->rxring;
  slots_read = bitmap_new(rxring->num_slots);
  if (!slots_read)
    pthread_exit(NULL);

  rv = worker_init(context);
  if (!rv) {
    pthread_exit(NULL);
  }

  printf("worker[%d]: initialized\n", context->thread_id);
  // signal to main() that we are initialized
  atomic_store_explicit(&context->initialized, 1, memory_order_release);

  for (;;) {
    if (pktbuff_used)
      pktbuff_out = pktbuff_allocator_borrow(&data->pktbuff_allocator);

    if (pktbuff_out) {
      pktbuff_out->thread_id = context->thread_id;
      pktbuff_used = 0;
      // read all the incoming packets
      while ((rv = tqueue_remove(context->pkt_recv_q, &transaction, &ring_idx))
                  != TQUEUE_EMPTY) {
        pktbuff_in = (void *)NETMAP_BUF(rxring,
                                     rxring->slot[(uint32_t)ring_idx].buf_idx);

        recv_pktbuff(pktbuff_in, pktbuff_out, &pcb, &pktbuff_used, context);
        if (pktbuff_used) {
          if (send_pktbuff(pktbuff_out, pcb, context, data) < 0)
            pktbuff_used = 0;
        }

        bitmap_set(slots_read, (uint32_t)ring_idx);
        if (rv == TQUEUE_TRANSACTION_EMPTY) {
          send_msg_transaction_update(dispatcher, slots_read,
                                      rxring->num_slots);
          bitmap_clearall(slots_read, rxring->num_slots);
        }
      } // while (packets)
    }   // pktbuff_out

    // read all the messages
    // TODO: handle MSG_ARPD_GET_MAC_REPLY, MSG_PACKET_SENT
    rv = squeue_enter(context->msg_q, 1);
    if (!rv)
      continue;
    while ((msg_hdr = squeue_get_next_pop_slot(context->msg_q)) != NULL) {
      switch (msg_hdr->msg_type) {
        case MSG_PACKET_SENT:
          pktbuff_allocator_return(&data->pktbuff_allocator,
                                ((struct msg_packet_sent *)msg_hdr)->pktbuff);
          break;
        case MSG_ARPD_GET_MAC_REPLY:  // for now, just ignore
          break;
        default:
          printf("worker[%d]: unknown message %hu\n", context->thread_id,
                  msg_hdr->msg_type);
      }
    }
    squeue_exit(context->msg_q);

    usleep(1000);
  } // for (;;)

  pthread_exit(NULL);
}