int main( void ) { // putc('a'); vidmem[1] = 0x7; // putc('1'); vidmem[3] = 0x7; // putc('2'); sched_add_task(task0); sched_add_task(task1); sched_run(); }
int main() { char name[20]; //name of the user cmd_welcome(name); //welcome screen getchar(); //wait for the user input new_line(); //go to new line sched_add_task(task0); sched_add_task(task1); sched_add_task(task2); sched_run(); return 0; }
/** * Create a new execution context and create a pointer to it. */ task_t* task_spawn(void* fct_ptr, void* arguments, void *return_val) { task_t* new_task; task_t* current_task = task_current(); ALLOCATE_TASK(new_task); pthread_mutex_init(&new_task->lock, NULL); pthread_mutex_init(&new_task->running_lock, NULL); if (inside_main()) { debug("Spawn called from main."); } else { int parent_id = (current_task->parent) ? (current_task->parent->id) : 0; if (parent_id < 0 ) exit(1); debug("Spawn called from a worker %d, parent %d", current_task->id, parent_id); } *((int*)return_val) = -1; new_task->arguments = (void*) arguments; new_task->function = (function_t) fct_ptr; new_task->result = return_val; new_task->status = STARTED; new_task->context->uc_link = &go_home; new_task->parent = current_task; if (!inside_main()) { task_inc_children_count(current_task); } getcontext(new_task->context); if (new_task->status == STARTED) { makecontext(new_task->context, (void (*) (void)) sched_wrapper_function, 0); sched_add_task(new_task); } return new_task; }
struct task *create_task(const char *name, int pid) { struct task *tsk = kmalloc(sizeof(*tsk)); tsk->state = TASK_RUNNABLE; tsk->pid = pid; tsk->name = name; tsk->thread.cpu.pc = (unsigned long) ret_from_fork; unsigned *stack = page_alloc(); stack += (PAGE_SIZE >> 2) - 1; *stack = (unsigned long) test_task; tsk->thread.cpu.sp = (unsigned) stack; sched_add_task(tsk); return tsk; }
error_t muksh_initialize(void) { task_t* task; task = task_create_with_entry(do_muksh, NULL); if (task == NULL) return ERROR_FAILURE; task_set_state(task, TASK_STATE_READY); task_set_timeslice(task, TASK_SCHED_TIMESLICE); sched_add_task(task); return ERROR_SUCCESS; }
/** * This is the architecture-independent kernel entry point. Before it is * called, architecture-specific code has done the bare minimum initialization * necessary. This function initializes the kernel and its various subsystems. * It calls back to architecture-specific code at several well defined points, * which all architectures must implement (e.g., setup_arch()). * * \callgraph */ void start_kernel() { unsigned int cpu; unsigned int timeout; int status; /* * Parse the kernel boot command line. * This is where boot-time configurable variables get set, * e.g., the ones with param() and DRIVER_PARAM() specifiers. */ parse_params(lwk_command_line); /* * Initialize the console subsystem. * printk()'s will be visible after this. */ console_init(); /* * Hello, Dave. */ printk("%s", lwk_banner); printk(KERN_DEBUG "%s\n", lwk_command_line); sort_exception_table(); /* * Do architecture specific initialization. * This detects memory, CPUs, architecture dependent irqs, etc. */ setup_arch(); /* * Setup the architecture independent interrupt handling. */ irq_init(); /* * Initialize the kernel memory subsystem. Up until now, the simple * boot-time memory allocator (bootmem) has been used for all dynamic * memory allocation. Here, the bootmem allocator is destroyed and all * of the free pages it was managing are added to the kernel memory * pool (kmem) or the user memory pool (umem). * * After this point, any use of the bootmem allocator will cause a * kernel panic. The normal kernel memory subsystem API should be used * instead (e.g., kmem_alloc() and kmem_free()). */ mem_subsys_init(); /* * Initialize the address space management subsystem. */ aspace_subsys_init(); sched_init_runqueue(0); /* This CPUs scheduler state + idle task */ sched_add_task(current); /* now safe to call schedule() */ /* * Initialize the task scheduling subsystem. */ core_timer_init(0); /* Start the kernel filesystems */ kfs_init(); /* * Initialize the random number generator. */ rand_init(); workq_init(); /* * Boot all of the other CPUs in the system, one at a time. */ printk(KERN_INFO "Number of CPUs detected: %d\n", num_cpus()); for_each_cpu_mask(cpu, cpu_present_map) { /* The bootstrap CPU (that's us) is already booted. */ if (cpu == 0) { cpu_set(cpu, cpu_online_map); continue; } printk(KERN_DEBUG "Booting CPU %u.\n", cpu); arch_boot_cpu(cpu); /* Wait for ACK that CPU has booted (5 seconds max). */ for (timeout = 0; timeout < 50000; timeout++) { if (cpu_isset(cpu, cpu_online_map)) break; udelay(100); } if (!cpu_isset(cpu, cpu_online_map)) panic("Failed to boot CPU %d.\n", cpu); } /* * Initialize the PCI subsystem. */ init_pci(); /* * Enable external interrupts. */ local_irq_enable(); #ifdef CONFIG_NETWORK /* * Bring up any network devices. */ netdev_init(); #endif #ifdef CONFIG_CRAY_GEMINI driver_init_list("net", "gemini"); #endif #ifdef CONFIG_BLOCK_DEVICE /** * Initialize the block devices */ blkdev_init(); #endif mcheck_init_late(); /* * And any modules that need to be started. */ driver_init_by_name( "module", "*" ); #ifdef CONFIG_KGDB /* * Stop eary (before "late" devices) in KGDB if requested */ kgdb_initial_breakpoint(); #endif /* * Bring up any late init devices. */ driver_init_by_name( "late", "*" ); /* * Bring up the Linux compatibility layer, if enabled. */ linux_init(); #ifdef CONFIG_DEBUG_HW_NOISE /* Measure noise/interference in the underlying hardware/VMM */ extern void measure_noise(int, uint64_t); measure_noise(0, 0); #endif /* * Start up user-space... */ printk(KERN_INFO "Loading initial user-level task (init_task)...\n"); if ((status = create_init_task()) != 0) panic("Failed to create init_task (status=%d).", status); current->state = TASK_EXITED; schedule(); /* This should not return */ BUG(); }
long sys_clone( unsigned long flags, unsigned long new_stack_ptr, int __user * parent_tid_ptr, int __user * child_tid_ptr, struct pt_regs * parent_regs ) { /* Only allow creating tasks that share everything with their parent */ unsigned long required_flags = ( CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM ); if ((flags & required_flags) != required_flags) { printk(KERN_WARNING "Unsupported clone() flags 0x%lx.\n", flags); return -ENOSYS; } start_state_t start_state = { .task_id = ANY_ID, .user_id = current->uid, .group_id = current->gid, .aspace_id = current->aspace->id, .cpu_id = ANY_ID, .stack_ptr = new_stack_ptr, .entry_point = USE_PARENT_IP, .use_args = 0, }; struct task_struct *tsk = __task_create(&start_state, parent_regs); if (!tsk) return -EINVAL; /* Name the new task something semi-sensible */ snprintf(tsk->name, sizeof(tsk->name), "%s.thread_%02u", strlen(current->name) ? current->name : "noname", tsk->id - tsk->aspace->id); /* Optionally initialize the task's set_child_tid and clear_child_tid */ if ((flags & CLONE_CHILD_SETTID)) tsk->set_child_tid = child_tid_ptr; if ((flags & CLONE_CHILD_CLEARTID)) tsk->clear_child_tid = child_tid_ptr; /* Optionally write the new task's ID to user-space memory. * It doesn't really matter if these fail. */ int tid = tsk->id; if ((flags & CLONE_PARENT_SETTID)) put_user(tid, parent_tid_ptr); if ((flags & CLONE_CHILD_SETTID)) put_user(tid, child_tid_ptr); /* Add the new task to the target CPU's run queue */ sched_add_task(tsk); return tsk->id; }
void basestation_init() { basestation_task.setup("basestation", Task::LOW, basestation_func, nullptr, basestation_stack, sizeof(basestation_stack)); KernelCriticalSection crit; sched_add_task(basestation_task); }