Пример #1
0
/* schedules a thread for deletion */
void destroy_thread(struct Thread *thread) {
	/* before entering - make sure it's not awake */
	lock_interrupts();

	struct Process *process = thread->process;

	/* remove this thread from the process */
	if(thread->next != 0)
		thread->next->previous = thread->previous;

	if(thread->previous != 0)
		thread->previous->next = thread->next;
	else {
		if(process == 0)
			kernel_threads = thread->next;
		else {
			process->threads = thread->next;
			process->threads_count--;
		}
	}

	/* schedule this thread for deletion */
	thread->next = next_thread_to_clean;
	next_thread_to_clean = thread;

	/* wake up the thread cleaner */
	schedule_thread(thread_cleaner_thread);

	unlock_interrupts();
}
Пример #2
0
void 
main(void)
{
  struct netif netif;

  lwip_init();

  netif_add(&netif, IP4_ADDR_ANY, IP4_ADDR_ANY, IP4_ADDR_ANY, NULL, netif_init, netif_input);
  netif.name[0] = 'e';
  netif.name[1] = '0';
  netif_create_ip6_linklocal_address(&netif, 1);
  netif.ip6_autoconfig_enabled = 1;
  netif_set_status_callback(&netif, netif_status_callback);
  netif_set_default(&netif);
  netif_set_up(&netif);
  
  /* Start DHCP and HTTPD */
  dhcp_start(&netif );
  httpd_init();

  while(1) {
    /* Check link state, e.g. via MDIO communication with PHY */
    if(link_state_changed()) {
      if(link_is_up()) {
        netif_set_link_up(&netif);
      } else {
        netif_set_link_down(&netif);
      }
    }

    /* Check for received frames, feed them to lwIP */
    lock_interrupts();
    struct pbuf* p = queue_try_get(&queue);
    unlock_interrupts();

    if(p != NULL) {
      LINK_STATS_INC(link.recv);
 
      /* Update SNMP stats (only if you use SNMP) */
      MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len);
      int unicast = ((p->payload[0] & 0x01) == 0);
      if (unicast) {
        MIB2_STATS_NETIF_INC(netif, ifinucastpkts);
      } else {
        MIB2_STATS_NETIF_INC(netif, ifinnucastpkts);
      }

      if(netif.input(p, &netif) != ERR_OK) {
        pbuf_free(p);
      }
    }
     
    /* Cyclic lwIP timers check */
    sys_check_timeouts();
     
    /* your application goes here */
  }
}
Пример #3
0
/* this is a thread that cleans up threads in limbo, we have to do this from another thread, because we can't deallocate a
   thread's stack in that thread's interrupt handler */
void thread_cleaner() {
	while(sleep_if_not_set((size_t *)&next_thread_to_clean)) {
		lock_interrupts();
		struct Thread *thread = next_thread_to_clean;
		if(thread) {
			next_thread_to_clean = thread->next;

			struct Process *process = thread->process;

			/* release used memory */
			unmap_physical_page(process ? process->pml4 : kernel_pml4, thread->stack, true);
			free(thread);
		}

		unlock_interrupts();
	}

}
static err_t netif_output(struct netif *netif, struct pbuf *p)
{
  LINK_STATS_INC(link.xmit);

  /* Update SNMP stats (only if you use SNMP) */
  MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len);
  int unicast = ((p->payload[0] & 0x01) == 0);
  if (unicast) {
    MIB2_STATS_NETIF_INC(netif, ifoutucastpkts);
  } else {
    MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts);
  }

  lock_interrupts();
  pbuf_copy_partial(p, mac_send_buffer, p->tot_len, 0);
  /* Start MAC transmit here */
  unlock_interrupts();

  return ERR_OK;
}
Пример #5
0
struct  Thread *create_thread(struct Process *process, size_t entry_point, size_t params) {
	lock_interrupts();

	struct Thread *thread = malloc(sizeof(struct Thread));
	if(thread == 0)
		return 0; /* out of memory */

	/* set up the stack - grab a virtual page */	
	thread->pml4 = process ? process->pml4 : kernel_pml4;
	size_t virt_page = find_free_page_range(thread->pml4, 1);
	if(virt_page == 0) {
		free(thread); /* out of memory */
		unlock_interrupts();
		return 0;
	}

	/* grab a physical page */
	size_t phys = get_physical_page();
	if(phys == 0) {
		free(thread); /* out of memory */
		unlock_interrupts();
		return 0;
	}


	/* map the new stack */
	map_physical_page(thread->pml4, virt_page, phys);

	/* now map this page for us to play with */
	size_t temp_addr = (size_t)map_physical_memory(phys, 0);

	/* set up our initial registers */
	struct isr_regs *regs = (struct isr_regs *)(temp_addr + page_size - sizeof(struct isr_regs));
	regs->r15 = 0; regs->r14 = 0; regs->r13 = 0; regs->r12 = 0; regs->r11 = 0; regs->r10 = 0; regs->r9 = 0; regs->r8 = 0;
	regs->rbp = virt_page + page_size; regs->rdi = params; regs->rsi = 0; regs->rdx = 0; regs->rcx = 0; regs->rbx = 0; regs->rax = 0;
	regs->int_no = 0; regs->err_code = 0;
	regs->rip = entry_point; regs->cs = 0x08;
	regs->eflags = 
		((!process) ? ((1 << 12) | (1 << 13)) : 0) | /* set iopl bits for kernel threads */
		(1 << 9) | /* interrupts enabled */
		(1 << 21) /* can use CPUID */; 
	regs->usersp = virt_page + page_size; regs->ss = 0x10;

	/* set up the thread object */
	thread->process = process;
	thread->stack = virt_page;
	thread->registers = (struct isr_regs *)(virt_page + page_size - sizeof(struct isr_regs));
	thread->id = next_thread_id;
	next_thread_id++;
	thread->awake = false;
	thread->awake_in_process = false;
	thread->time_slices = 0;
	/*print_string("Virtual page: ");
	print_hex(virt_page);
	asm("hlt");*/

	/* add it to the linked list of threads */
	thread->previous = 0;
	if(!process) {
		if(kernel_threads)
			kernel_threads->previous = thread;
		thread->next = kernel_threads;
		kernel_threads = thread;
	} else {
		if(process->threads)
			process->threads->previous = thread;
		thread->next = process->threads;
		process->threads = thread;
		process->threads_count++;
	}

	/* populate the fpu registers with something */
	memset(thread->fpu_registers, 0, 512);

	/* initially asleep */
	thread->next_awake = 0;
	thread->previous = 0;

	unlock_interrupts();

	return thread;
}