Esempio n. 1
0
int do_fork(unsigned flags)
{
	assert(current_task && kernel_task);
	assert(running_processes < (unsigned)MAX_TASKS || MAX_TASKS == -1);
	addr_t eip;
	task_t *task = task_create();
	page_dir_t *newspace;
	if(flags & FORK_SHAREDIR)
		newspace = vm_copy(current_task->pd);
	else
		newspace = vm_clone(current_task->pd, 0);
	if(!newspace)
	{
		kfree((void *)task);
		return -ENOMEM;
	}
	/* set the address space's entry for the current task.
	 * this is a fast and easy way to store the "what task am I" data
	 * that gets automatically updated when the scheduler switches
	 * into a new address space */
	arch_specific_set_current_task(newspace, (addr_t)task);
	/* Create the new task structure */
	task->pd = newspace;
	copy_task_struct(task, current_task, flags & FORK_SHAREDAT);
	add_atomic(&running_processes, 1);
	/* Set the state as usleep temporarily, so that it doesn't accidentally run.
	 * And then add it to the queue */
	task->state = TASK_USLEEP;
	tqueue_insert(primary_queue, (void *)task, task->listnode);
	cpu_t *cpu = (cpu_t *)current_task->cpu;
#if CONFIG_SMP
	cpu = fork_choose_cpu(current_task);
#endif
	/* Copy the stack */
	set_int(0);
	engage_new_stack(task, current_task);
	/* Here we read the EIP of this exact location. The parent then sets the
	 * eip of the child to this. On the reschedule for the child, it will 
	 * start here as well. */
	volatile task_t *parent = current_task;
	store_context_fork(task);
	eip = read_eip();
	if(current_task == parent)
	{
		/* These last things allow full execution of the task */
		task->eip=eip;
		task->state = TASK_RUNNING;
		task->cpu = cpu;
		add_atomic(&cpu->numtasks, 1);
		tqueue_insert(cpu->active_queue, (void *)task, task->activenode);
		__engage_idle();
		return task->pid;
	}
	return 0;
}
Esempio n. 2
0
void init_multitasking()
{
	printk(KERN_DEBUG, "[sched]: Starting multitasking system...\n");
	/* make the kernel task */
	task_t *task = task_create();
	task->pid = next_pid++;
	task->pd = (page_dir_t *)kernel_dir;
	task->stack_end=STACK_LOCATION;
	task->priority = 1;
	task->cpu = primary_cpu;
	task->thread = thread_data_create();
	/* alarm_mutex is aquired inside a kernel tick, so we may not schedule. */
	alarm_mutex = mutex_create(0, MT_NOSCHED);
	
	kill_queue = ll_create(0);
	primary_queue = tqueue_create(0, 0);
	primary_cpu->active_queue = tqueue_create(0, 0);

	tqueue_insert(primary_queue, (void *)task, task->listnode);
	tqueue_insert(primary_cpu->active_queue, (void *)task, task->activenode);
	
	primary_cpu->cur = task;
	primary_cpu->ktask = task;
	primary_cpu->numtasks=1;
	/* make this the "current_task" by assigning a specific location
	 * in the page directory as the pointer to the task. */
	arch_specific_set_current_task((addr_t *)kernel_dir, (addr_t)task);
	kernel_task = task;
	/* this is the final thing to allow the system to begin scheduling
	 * once interrupts are enabled */
	primary_cpu->flags |= CPU_TASK;
	
	add_atomic(&running_processes, 1);
#if CONFIG_MODULES
	add_kernel_symbol(delay);
	add_kernel_symbol(delay_sleep);
	add_kernel_symbol(schedule);
	add_kernel_symbol(run_scheduler);
	add_kernel_symbol(exit);
	add_kernel_symbol(sys_setsid);
	add_kernel_symbol(do_fork);
	add_kernel_symbol(kill_task);
	add_kernel_symbol(do_send_signal);
	add_kernel_symbol(dosyscall);
	add_kernel_symbol(task_pause);
	add_kernel_symbol(task_resume);
	add_kernel_symbol(got_signal);
 #if CONFIG_SMP
	add_kernel_symbol(get_cpu);
 #endif
	_add_kernel_symbol((addr_t)(task_t **)&kernel_task, "kernel_task");
#endif
}
Esempio n. 3
0
void task_unblock(struct llist *list, task_t *t)
{
	int old = set_int(0);
	tqueue_insert(((cpu_t *)t->cpu)->active_queue, (void *)t, t->activenode);
	struct llistnode *bn = t->blocknode;
	t->blocklist = 0;
	ll_do_remove(list, bn, 0);
	task_resume(t);
	assert(!set_int(old));
}
Esempio n. 4
0
void task_unblock_all(struct llist *list)
{
	int old = set_int(0);
	rwlock_acquire(&list->rwl, RWL_WRITER);
	struct llistnode *cur, *next;
	task_t *entry;
	ll_for_each_entry_safe(list, cur, next, task_t *, entry)
	{
		entry->blocklist = 0;
		assert(entry->blocknode == cur);
		ll_do_remove(list, cur, 1);
		tqueue_insert(((cpu_t *)entry->cpu)->active_queue, (void *)entry, entry->activenode);
		task_resume(entry);
	}
Esempio n. 5
0
int send_pktbuff(pktbuff *pktbuff_out, struct pcb *pcb,
                  struct thread_context *context, struct worker_data *data) {
  int rv;
  uint16_t len;

  // TODO: build layers based on pcb or pktbuff layer data
  // complete the lower layers and send pktbuff
  len = ethernet_build_header(&context->shared->if_info->mac,
                              &pcb->remote_mac,
                              IP4_ETHERTYPE, 0xFFFF, pktbuff_out->data);
  len += ip4_build_header(context->shared->inet_info->addr.s_addr,
                          FLOWID_REMOTE_IP(pcb->flowid), pktbuff_out->len,
                          pcb->ipproto, NULL,
                          FCL_PTR_PAST(pktbuff_out->data, len));
  pktbuff_out->len += len;

  // fixup layer 3 checksums
  struct ip4_pkt *send_ip = FCL_PTR_PAST(pktbuff_out->data,
                                      pcb_layer_offset(pcb, PCB_LAYER_IP4));
  struct tcp_pkt *tcp;
  struct udp_pkt *udp;
  switch(pcb->ipproto) {
    case IPPROTO_TCP:
      tcp = ip4_get_next(send_ip);
      tcp->tcp_h.th_sum = tcp_checksum(send_ip);
      break;
    case IPPROTO_UDP:
      udp = ip4_get_next(send_ip);
      udp->udp_h.uh_sum = udp_checksum(send_ip);
      break;
  }

  // and finally queue the pktbuff for sending
  rv = tqueue_insert(context->pkt_xmit_q, &data->xmit_transaction,
                      pktbuff_out);

  // xmit packet without waiting to fill a transaction
  if (rv == TQUEUE_SUCCESS) {
    tqueue_publish_transaction(context->pkt_xmit_q, &data->xmit_transaction);
    return 1;
  }

  return -1;
}
Esempio n. 6
0
void *dispatcher(void *threadarg) {
  assert(threadarg);

  struct thread_context *context;
  struct thread_context *contexts;
  int rv;
  struct netmap_ring *rxring;
  struct ethernet_pkt *etherpkt;
  struct pollfd pfd;
  struct dispatcher_data *data;
  uint32_t *slots_used, *open_transactions;
  uint32_t i, arpd_idx, num_threads;
  char *buf;
  struct msg_hdr *msg;
  struct ether_addr *mac;

  context = (struct thread_context *)threadarg;
  contexts = context->shared->contexts;
  data = context->data;
  arpd_idx = context->shared->arpd_idx;
  mac = &context->shared->if_info->mac;
  num_threads = context->shared->num_threads;

  struct transaction *transactions[num_threads];
  uint64_t dropped[num_threads];
  for (i=0; i < num_threads; i++) {
    transactions[i] = NULL;
    dropped[i] = 0;
  }

  rv = dispatcher_init(context);
  if (!rv) {
    pthread_exit(NULL);
  }

  rxring = NETMAP_RXRING(data->nifp, 0);
  slots_used = bitmap_new(rxring->num_slots);
  if (!slots_used)
    pthread_exit(NULL);

  open_transactions = bitmap_new(num_threads);
  if (!open_transactions)
    pthread_exit(NULL);

  pfd.fd = data->fd;
  pfd.events = (POLLIN);

  printf("dispatcher[%d]: initialized\n", context->thread_id);
  // signal to main() that we are initialized
  atomic_store_explicit(&context->initialized, 1, memory_order_release);

  for (;;) {
    rv = poll(&pfd, 1, POLL_TIMEOUT);

    // read all packets from the ring
    if (rv > 0) {
      for (; rxring->avail > 0; rxring->avail--) {
        i = rxring->cur;
        rxring->cur = NETMAP_RING_NEXT(rxring, i);
        rxring->reserved++;
        buf = NETMAP_BUF(rxring, rxring->slot[i].buf_idx);
        etherpkt = (struct ethernet_pkt *)(void *)buf;

        // TODO: consider pushing this check to the workers
        if (!ethernet_is_valid(etherpkt, mac)) {
          if (rxring->reserved == 1)
            rxring->reserved = 0;
          continue;
        }

        // TODO: dispatch to n workers instead of just 0
        switch (etherpkt->h.ether_type) {
          case IP4_ETHERTYPE:
            rv = tqueue_insert(contexts[0].pkt_recv_q,
                               &transactions[0], (char *) NULL + i);
            switch (rv) {
              case TQUEUE_SUCCESS:
                bitmap_set(slots_used, i);
                bitmap_set(open_transactions, 0);
                break;
              case TQUEUE_TRANSACTION_FULL:
                bitmap_set(slots_used, i);
                bitmap_clear(open_transactions, 0);
                break;
              case TQUEUE_FULL:
                // just drop packet and do accounting
                dropped[0]++;
                if (rxring->reserved == 1)
                  rxring->reserved = 0;
                break;
            }
            break;
          case ARP_ETHERTYPE:
            rv = tqueue_insert(contexts[arpd_idx].pkt_recv_q,
                               &transactions[arpd_idx], (char *) NULL + i);
            switch (rv) {
              case TQUEUE_SUCCESS:
                tqueue_publish_transaction(contexts[arpd_idx].pkt_recv_q,
                                            &transactions[arpd_idx]);
                bitmap_set(slots_used, i);
                break;
              case TQUEUE_TRANSACTION_FULL:
                bitmap_set(slots_used, i);
                break;
              case TQUEUE_FULL:
                // just drop packet and do accounting
                dropped[arpd_idx]++;
                if (rxring->reserved == 1)
                  rxring->reserved = 0;
                break;
            }
            break;
          default:
            printf("dispatcher[%d]: unknown/unsupported ethertype %hu\n",
                    context->thread_id, etherpkt->h.ether_type);
            if (rxring->reserved == 1)
              rxring->reserved = 0;
        } // switch (ethertype)
      } // for (rxring)

      // publish any open transactions so that the worker can start on it
      for (i=0; i < num_threads; i++) {
        if (bitmap_get(open_transactions, i))
          tqueue_publish_transaction(contexts[i].pkt_recv_q, &transactions[i]);
      }
      bitmap_clearall(open_transactions, num_threads);
    } // if (packets)

    // read the message queue
    rv = squeue_enter(context->msg_q, 1);
    if (!rv)
      continue;
    while ((msg = squeue_get_next_pop_slot(context->msg_q)) != NULL) {
      switch (msg->msg_type) {
        case MSG_TRANSACTION_UPDATE:
          update_slots_used(context->msg_q, slots_used, rxring);
          break;
        case MSG_TRANSACTION_UPDATE_SINGLE:
          update_slots_used_single((void *)msg, slots_used, rxring);
          break;
        default:
          printf("dispatcher: unknown message %hu\n", msg->msg_type);
      }
    }
    squeue_exit(context->msg_q);

  } // for(;;)

  pthread_exit(NULL);
}