struct task_struct * __cpuinit fork_idle(int cpu) { struct task_struct *task; struct pt_regs regs; task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, &init_struct_pid, 0, 0); if (!IS_ERR(task)) { init_idle_pids(task->pids); init_idle(task, cpu); } return task; }
//fork子进程,内核线程不可直接调用 pid_t sys_fork(void){ struct task_struct* parent_thread = running_thread(); struct task_struct* child_thread = get_kernel_pages(1); //为子进程创建pcb(task_struct结构) if(child_thread == NULL){ return -1; } ASSERT(INTR_OFF == intr_get_status() && parent_thread->pgdir != NULL); if(copy_process(child_thread,parent_thread) == -1){ return -1; } //添加到就绪线程队列和所有线程队列,子进程由调度器安排运行 ASSERT(!elem_find(&thread_ready_list,&child_thread->general_tag)); list_append(&thread_ready_list,&child_thread->general_tag); ASSERT(!elem_find(&thread_all_list,&child_thread->all_list_tag)); list_append(&thread_all_list,&child_thread->all_list_tag); return child_thread->pid; }
/* * fork系统调用。NOTE! 其参数须与copy_process()同步。fork只创建 * 一个新进程,子进程与父进程暂时共享页表。直到父进程或子进程 * 写页面时,子进程再分配自己的页表以及页面。 */ int sys_fork(long none, long ebx, long ecx, long edx, long gs, long fs, long es, long ds, long ebp, long esi, long edi, long eip, long cs, long eflags, long esp, long ss) { int nr = 0; if(!(nr = find_empty_process())) { k_printf("fork: have no empty-process!"); return -1; } if(0 == (proc[nr]=(struct proc_struct *)get_free_page())) { k_printf("fork: have no free-page!"); return -1; } d_printf("pid-%d-proc-struct ADDR: %x.\n", last_pid, proc[nr]); copy_process(nr, gs, fs, es, ds, edi, esi, ebp, edx, ecx, ebx, eip, cs, eflags, esp, ss); proc[nr]->state = RUNNING; return last_pid; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } /* * When called from kernel_thread, don't do user tracing stuff. */ if (likely(user_mode(regs))) trace = tracehook_prepare_clone(clone_flags); p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } audit_finish_fork(p); tracehook_report_clone(regs, clone_flags, nr, p); /* * We set PF_STARTING at creation in case tracing wants to * use this to distinguish a fully live task from one that * hasn't gotten to tracehook_report_clone() yet. Now we * clear it and set the child going. */ p->flags &= ~PF_STARTING; wake_up_new_task(p); tracehook_report_clone_complete(trace, regs, clone_flags, nr, p); if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); tracehook_report_vfork_done(p, nr); } } else { nr = PTR_ERR(p); } return nr; }
/* * Simulates a multilevel feedback queue cpu scheduling algorithm * TODO Update the file parsing to be smarter and provide documentation */ int main(int argc, char** argv) { const int FIELD_LENGTH = 1024; //Default string allocation char line[FIELD_LENGTH]; //Buffer for line reading of master file char* sub_out; //Output of substring checks for certain fields int read_T1 = 0; //0 if T1 hasnt been read yet int read_T2 = 0; //0 if T2 hasnt been read yet int T1, T2; //Time Quantums int processCount = 0; //Number of processes read from the file int processCompleted = 0; //Counter for number of process completed Queue* futureProcesses = new_queue(0); //Queue of all the processes that havent technically been submitted yet Queue* completed = new_queue(-1); //Queue of all completed processes, for output of averaging at the end int i; //Open the file passed in as the only argument to the program FILE* master_file = fopen(argv[1], "rt"); if (master_file == 0) { fprintf(stderr, "Failed to open %s\n", argv[1]); exit(1); } //Open output file for logging functions of the simulator FILE* out_file = fopen("cpu_output.txt", "w"); fprintf(out_file, "############################################################\n"); fprintf(out_file, "Logging for Multi-Level Queue Scheduling Simulation Started!\n"); fprintf(out_file, "############################################################\n\n"); //Read in the file and create necessary process objects if (master_file != NULL) { //Read line by line to get T1 and T2 while (fgets(line, FIELD_LENGTH, master_file) != NULL) { if (strcmp(line, "\n") == 0) { //SKIP NEW LINES IN THE FILE } else { //Change the line to all lowercase array_to_lower(line); //Read in TQ#1 and TQ#2 if (!read_T1) { sub_out = strstr(line, "time quantum 1:"); if (sub_out != NULL) { read_T1 = 1; T1 = get_num(sub_out); } } else if (!read_T2) { sub_out = strstr(line, "time quantum 2:"); if (sub_out != NULL) { read_T2 = 1; T2 = get_num(sub_out); break; } } } } //Initialize a CPU CPU* cpu = new_cpu(T1, T2); //Read in the rest of the file and create all processes and their associated bursts while (fgets(line, FIELD_LENGTH, master_file) != NULL) { if (strcmp(line, "\n") == 0) { //SKIP NEW LINES IN THE FILE } else { //Change the line to all lowercase array_to_lower(line); Process* end = get_end(futureProcesses); //Get the process id and create a new process for it sub_out = strstr(line, "process id:"); if (sub_out != NULL) { int id = get_num(sub_out); Process* process = new_process(id); if (end != NULL) { end->next = process; } else { futureProcesses->processes = process; } processCount++; } //Get the arrival time of the most recently created process sub_out = strstr(line, "arrival time:"); if (sub_out != NULL) { int arrival = get_num(sub_out); end->arrival_time = arrival; } //Create a cpu burst of the recently created process sub_out = strstr(line, "cpu burst:"); if (sub_out != NULL) { int length = get_num(sub_out); Burst* cpu_burst = new_cpu_burst(length); Burst* burst_end = get_last_burst(end); if (burst_end != NULL) { burst_end->next_burst = cpu_burst; } else { end->bursts = cpu_burst; } } //Create a i/o burst for the recently created process sub_out = strstr(line, "i/o burst:"); if (sub_out != NULL) { int length = get_num(sub_out); Burst* io_burst = new_io_burst(length); Burst* burst_end = get_last_burst(end); if (burst_end != NULL) { burst_end->next_burst = io_burst; } else { end->bursts = io_burst; } } //Set the device id for the recently created i/o burst sub_out = strstr(line, "i/o device id:"); if (sub_out != NULL) { int id = get_num(sub_out); Burst* burst_end = get_last_burst(end); if (burst_end != NULL) { burst_end->device_num = id; } else { end->bursts->device_num = id; } } } } fclose(master_file); //Handle if no processes are read in if (processCount == 0) { fclose(out_file); fprintf(stderr, "Zero processes were read in.\nThis could be correct or an error in the format of the input file.\nPlease verify that the input file is formatted correctly. \n"); exit(1); } //Run the simulation of the cpu until all processes have been completed while (processCompleted < processCount) { //Go through the list of processes that havent arrived yet Process* tempProc = futureProcesses->processes; Process* lastTemp = NULL; while (tempProc != NULL) { if (tempProc->arrival_time == cpu->time) { //Create a copy of the process to move to the cpu Process* moved = copy_process(tempProc); fprintf(out_file, "Time: %-5d\tProcess#%d Arrived\n", cpu->time, moved->id); if (moved->bursts->type == 1) { //Initial burst is cpu printf("NEW PROCESS CPU FIRST\n"); fprintf(out_file, "Time: %-5d\tInitial burst of Process#%d is a CPU burst\n", cpu->time, moved->id); if (cpu->idle == 1) { //If nothing is running on the cpu fprintf(out_file, "Time: %-5d\tCPU was found to be idle, running process on CPU\n", cpu->time); moved->state = 1; cpu->current_process = moved; cpu->idle = 0; cpu->queue = 1; printQueues(out_file, cpu); } else { //If cpu is occupied put new process in Q1 fprintf(out_file, "Time: %-5d\tCPU was found to be busy, putting process in Q1\n", cpu->time); Process* end = get_end(cpu->Q1); moved->state = 2; if (end != NULL) { //If Stuff is already in Q1 end->next = moved; } else { //If nothing is in Q1 cpu->Q1->processes = moved; cpu->Q1->isEmpty = 0; } printQueues(out_file, cpu); } } else { //Initial burst is I/O printf("NEW PROCESS I/O FIRST\n"); Burst* i_burst = moved->bursts; int device = i_burst->device_num; IODevice* devQ; switch (device) { case 1: //Next burst is on D1 devQ = cpu->D1; break; case 2: //Next burst is on D2 devQ = cpu->D2; break; case 3: //Next burst is on D3 devQ = cpu->D3; break; case 4: //Next burst is on D4 devQ = cpu->D4; break; case 5: //Next burst is on D5 devQ = cpu->D5; break; } //Move the process to the correct i/o device fprintf(out_file, "Time: %-5d\tInitial burst of Process#%d is a I/O burst on device %d\n", cpu->time, cpu->current_process->id, devQ->id); Process* io_end = get_io_proc_end(devQ); if (io_end != NULL) { io_end->next = moved; } else { fprintf(out_file, "Time: %-5d\tProcess#%d running on I/O Device Queue %d\n", cpu->time, cpu->current_process->id, devQ->id); devQ->processes = moved; } printQueues(out_file, cpu); } //Remove the process from futureProcesses if (lastTemp != NULL) { //This wasnt the first lastTemp->next = tempProc->next; tempProc = lastTemp->next; } else { //Process was the first or only in future list futureProcesses->processes = tempProc->next; tempProc = futureProcesses->processes; } } else { //Times not equal get next process in list lastTemp = tempProc; tempProc = tempProc->next; } } //Increment running process time on cpu and decrement its remaining time //If remaining time is 0, if last burst move process to completed otherwise move it to correct i/o queue if (cpu->idle == 0) { printf("CPU NOT IDLE\n"); Burst* burst = get_next_incomplete_burst(cpu->current_process); if (burst != NULL) { printf("NEXT BURST IS NOT NULL\n"); burst->time_active++; burst->time_remaining--; if (burst->time_remaining == 0) { //Burst is complete printf("BURST COMPLETE\n"); burst->completed = 1; fprintf(out_file, "Time: %-5d\tCPU burst of Process#%d completed\n", cpu->time, cpu->current_process->id); if (burst->next_burst != NULL) { printf("MORE BURSTS\n"); int device = burst->next_burst->device_num; IODevice* devQ; switch (device) { case 1: //Next burst is on D1 devQ = cpu->D1; break; case 2: //Next burst is on D2 devQ = cpu->D2; break; case 3: //Next burst is on D3 devQ = cpu->D3; break; case 4: //Next burst is on D4 devQ = cpu->D4; break; case 5: //Next burst is on D5 devQ = cpu->D5; break; } //Move the process to the correct i/o device fprintf(out_file, "Time: %-5d\tProcess#%d moved to I/O Device Queue %d\n", cpu->time, cpu->current_process->id, devQ->id); Process* io_end = get_io_proc_end(devQ); cpu->current_process->next = NULL; if (io_end != NULL) { io_end->next = cpu->current_process; } else { fprintf(out_file, "Time: %-5d\tProcess#%d running on I/O Device Queue %d\n", cpu->time, cpu->current_process->id, devQ->id); devQ->processes = cpu->current_process; } } else { printf("PROCESS COMPLETE\n"); cpu->current_process->state = 0; fprintf(out_file, "Time: %-5d\tProcess#%d completed. Time waiting for CPU: %d Time waiting for I/O: %d Total completion time: %d\n", cpu->time, cpu->current_process->id, cpu->current_process->waiting_cpu, cpu->current_process->waiting_io, (cpu->time - cpu->current_process->arrival_time)); cpu->current_process->completion_t = cpu->time; processCompleted++; Process* end = get_end(completed); if (end != NULL) { //If there are already processes in completed end->next = cpu->current_process; } else { //If this is the first process in completed completed->processes = cpu->current_process; } } //Cpu is empty cpu->current_process = NULL; cpu->idle = 1; cpu->queue = 0; printQueues(out_file, cpu); } } } //Increase cpu wait time for all processes in all 3 queues Process* pq1 = get_end(cpu->Q1); while (pq1 != NULL) { printf("INCREMENTING Q1 WAIT TIMES\n"); pq1->waiting_cpu++; pq1 = pq1->next; } Process* q2 = get_end(cpu->Q2); while (q2 != NULL) { printf("INCREMENTING Q2 WAIT TIMES\n"); q2->waiting_cpu++; q2 = q2->next; } Process* pq3 = get_end(cpu->Q3); while (pq3 != NULL) { printf("INCREMENTING Q3 WAIT TIMES\n"); pq3->waiting_cpu++; pq3 = pq3->next; } //For the first process in each device queue decrement time remaining //If remaining time is 0, if last burst move process to completed otherwise move it to Q1 //Increment waiting time of process moved to front of device queue Process* pd1 = cpu->D1->processes; Process* pd2 = cpu->D2->processes; Process* pd3 = cpu->D3->processes; Process* pd4 = cpu->D4->processes; Process* pd5 = cpu->D5->processes; if (pd1 != NULL) { //Handle running process in device 1 printf("PROCESS IN D1\n"); Burst* bd1 = get_next_incomplete_burst(pd1); if (bd1 != NULL) { printf("D1 PROCESS BURST EXISTS\n"); bd1->time_remaining--; if (bd1->time_remaining == 0) { printf("BURST IN D1 FINISHED\n"); bd1->completed = 1; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d completed on device %d\n", cpu->time, pd1->id, bd1->device_num); cpu->D1->processes = pd1->next; if (cpu->D1->processes != NULL) { cpu->D1->processes->waiting_io++; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d running on device %d\n", cpu->time, pd1->next->id, bd1->device_num); } if (bd1->next_burst != NULL) { printf("MOVING PROCESS FROM D1 to Q1\n"); Process* end_q = get_end(cpu->Q1); fprintf(out_file, "Time: %-5d\tMoving Process#%d to Q1\n", cpu->time, pd1->id); pd1->next = NULL; if (end_q != NULL) { end_q->next = pd1; } else { cpu->Q1->processes = pd1; cpu->Q1->isEmpty = 0; } printQueues(out_file, cpu); } else { printf("PROCESS COMPLETED IN D1\n"); pd1->state = 0; Process* end_completed = get_end(completed); processCompleted++; pd1->completion_t = cpu->time; fprintf(out_file, "Time: %-5d\tProcess#%d completed. Time waiting for CPU: %d Time waiting for I/O: %d Total completion time: %d\n", cpu->time, pd1->id, pd1->waiting_cpu, pd1->waiting_io, (cpu->time - pd1->arrival_time)); if (end_completed != NULL) { end_completed->next = pd1; } else { completed->processes = pd1; completed->isEmpty = 0; } printQueues(out_file, cpu); } } } } if (pd2 != NULL) { //Handle running process in device 2 printf("PROCESS IN D2\n"); Burst* bd2 = get_next_incomplete_burst(pd2); if (bd2 != NULL) { printf("D2 PROCESS BURST EXISTS\n"); bd2->time_remaining--; if (bd2->time_remaining == 0) { printf("BURST IN D2 FINISHED\n"); bd2->completed = 1; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d completed on device %d\n", cpu->time, pd2->id, bd2->device_num); cpu->D2->processes = pd2->next; if (cpu->D1->processes != NULL) { cpu->D2->processes->waiting_io++; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d running on device %d\n", cpu->time, pd2->next->id, bd2->device_num); } if (bd2->next_burst != NULL) { printf("MOVING PROCESS FROM D2 to Q1\n"); Process* end_q = get_end(cpu->Q1); fprintf(out_file, "Time: %-5d\tMoving Process#%d to Q1\n", cpu->time, pd2->id); pd2->next = NULL; if (end_q != NULL) { end_q->next = pd2; } else { cpu->Q1->processes = pd2; cpu->Q1->isEmpty = 0; } printQueues(out_file, cpu); } else { printf("PROCESS COMPLETED IN D2\n"); pd2->state = 0; Process* end_completed = get_end(completed); processCompleted++; pd2->completion_t = cpu->time; fprintf(out_file, "Time: %-5d\tProcess#%d completed. Time waiting for CPU: %d Time waiting for I/O: %d Total completion time: %d\n", cpu->time, pd2->id, pd2->waiting_cpu, pd2->waiting_io, (cpu->time - pd2->arrival_time)); if (end_completed != NULL) { end_completed->next = pd2; } else { completed->processes = pd2; completed->isEmpty = 0; } printQueues(out_file, cpu); } } } } if (pd3 != NULL) { //Handle running process in device 3 printf("PROCESS IN D3\n"); Burst* bd3 = get_next_incomplete_burst(pd3); if (bd3 != NULL) { printf("D3 PROCESS BURST EXISTS\n"); bd3->time_remaining--; if (bd3->time_remaining == 0) { printf("BURST IN D3 FINISHED\n"); bd3->completed = 1; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d completed on device %d\n", cpu->time, pd3->id, bd3->device_num); cpu->D3->processes = pd3->next; if (cpu->D1->processes != NULL) { cpu->D3->processes->waiting_io++; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d running on device %d\n", cpu->time, pd3->next->id, bd3->device_num); } if (bd3->next_burst != NULL) { printf("MOVING PROCESS FROM D3 to Q1\n"); Process* end_q = get_end(cpu->Q1); fprintf(out_file, "Time: %-5d\tMoving Process#%d to Q1\n", cpu->time, pd3->id); pd3->next = NULL; if (end_q != NULL) { end_q->next = pd3; } else { cpu->Q1->processes = pd3; cpu->Q1->isEmpty = 0; } printQueues(out_file, cpu); } else { printf("PROCESS COMPLETED IN D3\n"); pd3->state = 0; Process* end_completed = get_end(completed); processCompleted++; pd3->completion_t = cpu->time; fprintf(out_file, "Time: %-5d\tProcess#%d completed. Time waiting for CPU: %d Time waiting for I/O: %d Total completion time: %d\n", cpu->time, pd3->id, pd3->waiting_cpu, pd3->waiting_io, (cpu->time - pd3->arrival_time)); if (end_completed != NULL) { end_completed->next = pd3; } else { completed->processes = pd3; completed->isEmpty = 0; } printQueues(out_file, cpu); } } } } if (pd4 != NULL) {//Handle running process in device 4 printf("PROCESS IN D4\n"); Burst* bd4 = get_next_incomplete_burst(pd4); if (bd4 != NULL) { printf("D4 PROCESS BURST EXISTS\n"); bd4->time_remaining--; if (bd4->time_remaining == 0) { printf("BURST IN D4 FINISHED\n"); bd4->completed = 1; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d completed on device %d\n", cpu->time, pd4->id, bd4->device_num); cpu->D4->processes = pd4->next; if (cpu->D1->processes != NULL) { cpu->D4->processes->waiting_io++; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d running on device %d\n", cpu->time, pd4->next->id, bd4->device_num); } if (bd4->next_burst != NULL) { printf("MOVING PROCESS FROM D4 to Q1\n"); Process* end_q = get_end(cpu->Q1); fprintf(out_file, "Time: %-5d\tMoving Process#%d to Q1\n", cpu->time, pd4->id); pd4->next = NULL; if (end_q != NULL) { end_q->next = pd4; } else { cpu->Q1->processes = pd4; cpu->Q1->isEmpty = 0; } printQueues(out_file, cpu); } else { printf("PROCESS COMPLETED IN D4\n"); pd4->state = 0; Process* end_completed = get_end(completed); processCompleted++; pd4->completion_t = cpu->time; fprintf(out_file, "Time: %-5d\tProcess#%d completed. Time waiting for CPU: %d Time waiting for I/O: %d Total completion time: %d\n", cpu->time, pd4->id, pd4->waiting_cpu, pd4->waiting_io, (cpu->time - pd4->arrival_time)); if (end_completed != NULL) { end_completed->next = pd4; } else { completed->processes = pd4; completed->isEmpty = 0; } printQueues(out_file, cpu); } } } } if (pd5 != NULL) {//Handle running process in device 5 printf("PROCESS IN D5\n"); Burst* bd5 = get_next_incomplete_burst(pd5); if (bd5 != NULL) { printf("D5 PROCESS BURST EXISTS\n"); bd5->time_remaining--; if (bd5->time_remaining == 0) { printf("BURST IN D5 FINISHED\n"); bd5->completed = 1; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d completed on device %d\n", cpu->time, pd5->id, bd5->device_num); cpu->D5->processes = pd5->next; if (cpu->D1->processes != NULL) { cpu->D5->processes->waiting_io++; fprintf(out_file, "Time: %-5d\tI/O burst of Process#%d running on device %d\n", cpu->time, pd5->next->id, bd5->device_num); } if (bd5->next_burst != NULL) { printf("MOVING PROCESS FROM D5 to Q1\n"); Process* end_q = get_end(cpu->Q1); fprintf(out_file, "Time: %-5d\tMoving Process#%d to Q1\n", cpu->time, pd5->id); pd5->next = NULL; if (end_q != NULL) { end_q->next = pd5; } else { cpu->Q1->processes = pd5; cpu->Q1->isEmpty = 0; } printQueues(out_file, cpu); } else { printf("PROCESS COMPLETED IN D5\n"); pd5->state = 0; Process* end_completed = get_end(completed); processCompleted++; pd5->completion_t = cpu->time; fprintf(out_file, "Time: %-5d\tProcess#%d completed. Time waiting for CPU: %d Time waiting for I/O: %d Total completion time: %d\n", cpu->time, pd5->id, pd5->waiting_cpu, pd5->waiting_io, (cpu->time - pd5->arrival_time)); if (end_completed != NULL) { end_completed->next = pd5; } else { completed->processes = pd5; completed->isEmpty = 0; } printQueues(out_file, cpu); } } } } //Increment the io waiting time for all processes in device queues after the first process in each pd1 = cpu->D1->processes; pd2 = cpu->D2->processes; pd3 = cpu->D3->processes; pd4 = cpu->D4->processes; pd5 = cpu->D5->processes; if (pd1 != NULL) { //Handle device 1 pd1 = pd1->next; while (pd1 != NULL) { printf("Incrementing device 1 waiting\n"); pd1->waiting_io++; pd1 = pd1->next; } } if (pd2 != NULL) { //Handle device 2 pd2 = pd2->next; while (pd2 != NULL) { printf("Incrementing device 2 waiting\n"); pd2->waiting_io++; pd2 = pd2->next; } } if (pd3 != NULL) { //Handle device 3 pd3 = pd3->next; while (pd3 != NULL) { printf("Incrementing device 3 waiting\n"); pd3->waiting_io++; pd3 = pd3->next; } } if (pd4 != NULL) { //Handle device 4 pd4 = pd4->next; while (pd4 != NULL) { printf("Incrementing device 4 waiting\n"); pd4->waiting_io++; pd4 = pd4->next; } } if (pd5 != NULL) { //Handle device 5 pd5 = pd5->next; while (pd5 != NULL) { printf("Incrementing device 5 waiting\n"); pd5->waiting_io++; pd5 = pd5->next; } } //If cpu is running in Q3 //Check for other processes with shorter remaining times //If there is one move running process to end of Q3 //Check if Q1 and Q2 are empty, if either isnt free up the cpu and put the running process back into Q3 if (cpu->queue == 3 && cpu->current_process != NULL) { printf("CPU is in Q3\n"); if (cpu->Q1->processes != NULL || cpu->Q2->processes != NULL || shortest(cpu->current_process, cpu->Q3) == 0) { printf("PROCESS IN Q3 IS BEING PREEMTED\n"); Process* end_que = get_end(cpu->Q3); cpu->current_process->state = 2; fprintf(out_file, "Time: %-5d\tProcess#%d has been moved back into Q3 due to there being a shorter process in Q3 to run or there are processes in Q2 or Q1 that need to be run\n", cpu->time, cpu->current_process->id); if (end_que != NULL) { end_que->next = cpu->current_process; } else { cpu->Q3->processes = cpu->current_process; cpu->Q3->isEmpty = 0; } cpu->current_process = NULL; cpu->idle = 1; cpu->queue = 0; printQueues(out_file, cpu); } else { printf("NOTHING IN Q3 PREMTED\n"); } } //If cpu is running in Q2 //If the current process time on cpu > TQ2 //Move process to Q3 and set cpu to idle //If Q1 isnt empty move running process back into Q2, but dont reset its time on the cpu if (cpu->queue == 2) { printf("CPU is in Q2\n"); if (get_next_incomplete_burst(cpu->current_process) != NULL && get_next_incomplete_burst(cpu->current_process)->time_active > cpu->TQ2) { //Time ran out for the process move it to Q3 printf("TIME RAN OUT FOR PROCESS IN Q2\n"); fprintf(out_file, "Time: %-5d\tProcess#%d moved to Q3 for not being able to complete within the Time Quantum of Q2\n", cpu->time, cpu->current_process->id); Process* end_q3 = get_end(cpu->Q3); cpu->current_process->state = 2; if (end_q3 != NULL) { end_q3->next = cpu->current_process; } else { cpu->Q3->processes = cpu->current_process; cpu->Q3->isEmpty = 0; } cpu->current_process = NULL; cpu->idle = 1; cpu->queue = 0; printQueues(out_file, cpu); } else if (cpu->Q1->isEmpty == 0) { //Preempt the process since there are processes in Q1 printf("PREMETING PROCESS IN Q2 FOR ONE IN Q1\n"); fprintf(out_file, "Time: %-5d\tProcess#%d moved back into Q2 because there are processes in Q1 that need to be run\n", cpu->time, cpu->current_process->id); Process* end_q2 = get_end(cpu->Q2); cpu->current_process->state = 2; if (end_q2 != NULL) { end_q2->next = cpu->current_process; } else { cpu->Q2->processes = cpu->current_process; cpu->Q2->isEmpty = 0; } cpu->current_process = NULL; cpu->idle = 1; cpu->queue = 0; printQueues(out_file, cpu); } else { printf("NOTHING MOVED OFF CPU FROM Q2\n"); } } //If the cpu is running in Q1 //If the current process time on cpu > TQ1 //Move the process to Q2 and set cpu to idle if (cpu->queue == 1) { printf("CPU is in Q1\n"); if (get_next_incomplete_burst(cpu->current_process) != NULL && get_next_incomplete_burst(cpu->current_process)->time_active > cpu->TQ1) { //Time ran out for the process move it to Q2 printf("TIME RAN OUT FOR PROCESS IN Q1\n"); fprintf(out_file, "Time: %-5d\tProcess#%d moved to Q2 because it could not complete within the Time Quantum of Q1\n", cpu->time, cpu->current_process->id); Process* end_q2 = get_end(cpu->Q2); get_next_incomplete_burst(cpu->current_process)->time_active = 0; cpu->current_process->state = 2; if (end_q2 != NULL) { end_q2->next = cpu->current_process; } else { cpu->Q2->processes = cpu->current_process; cpu->Q2->isEmpty = 0; } cpu->current_process = NULL; cpu->idle = 1; cpu->queue = 0; printQueues(out_file, cpu); } else { printf("NOTHING REMOVED FOR TIME FROM Q1\n"); } } //If Q1 has stuff in it and the cpu is free //Put the first thing from Q1 onto the cpu if (cpu->Q1->processes != NULL && cpu->idle == 1) { printf("Q1 not empty and CPU idle\n"); Process* q1_popped = cpu->Q1->processes; fprintf(out_file, "Time: %-5d\tMoving Process#%d from Q1 onto the CPU\n", cpu->time, q1_popped->id); cpu->Q1->processes = q1_popped->next; q1_popped->next = NULL; if (cpu->Q1->processes == NULL) { cpu->Q1->isEmpty = 1; } cpu->current_process = q1_popped; cpu->queue = 1; cpu->idle = 0; q1_popped->state = 1; printQueues(out_file, cpu); }//Else If Q2 has stuff in it and the cpu is free //Put the first thing from Q2 onto the cpu else if (cpu->Q2->processes != NULL && cpu->idle == 1) { printf("Q2 not empty and CPU idle\n"); Process* q2_popped = cpu->Q2->processes; fprintf(out_file, "Time: %-5d\tMoving Process#%d from Q2 onto the CPU\n", cpu->time, q2_popped->id); cpu->Q2->processes = q2_popped->next; q2_popped->next = NULL; if (cpu->Q2->processes == NULL) { cpu->Q2->isEmpty = 1; } cpu->current_process = q2_popped; cpu->queue = 2; cpu->idle = 0; q2_popped->state = 1; printQueues(out_file, cpu); }//Else If Q3 has stuff in it and the cpu is free //Put the lowest time remaining onto the cpu else if (cpu->Q3->processes != NULL && cpu->idle == 1) { printf("Q3 not empty and CPU idle\n"); //FIX THIS //Process* shortest = get_shortest_remaining(cpu->Q3); Process* shortest = cpu->Q3->processes; cpu->Q3->processes = shortest->next; fprintf(out_file, "Time: %-5d\tMoving Process#%d from Q3 onto the CPU\n", cpu->time, shortest->id); cpu->current_process = shortest; cpu->queue = 3; cpu->idle = 0; shortest->state = 1; if (cpu->Q3->processes == NULL) { cpu->Q3->isEmpty = 1; } printQueues(out_file, cpu); } cpu->time++; printf("TIME: %d\n", cpu->time); } //Write final average data to the output file //Average waiting time //Average turnaround time int waiting_sum = 0; int total_sum = 0; Process* itr = completed->processes; while (itr != NULL) { waiting_sum += itr->waiting_cpu; total_sum += (itr->completion_t - itr->arrival_time); itr = itr->next; } fprintf(out_file, "\n\nAverage waiting time: %d\nAverage turnaround time: %d\n", waiting_sum / processCompleted, total_sum / processCompleted); fprintf(out_file, "\n\n############################################################\n"); fprintf(out_file, "Logging for Multi-Level Queue Scheduling Simulation Ended!\n"); fprintf(out_file, "############################################################\n"); fclose(out_file); } return (EXIT_SUCCESS); }
long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD){ printk("[%d:%s] fork fail at clone_thread, flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags); return -EINVAL; } /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)){ printk("[%d:%s] fork fail at capable not match, flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags); return -EPERM; } } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event(trace, nr); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); } #ifdef CONFIG_SCHEDSTATS /* mt shceduler profiling*/ save_mtproc_info(p, sched_clock()); printk(KERN_DEBUG "[%d:%s] fork [%d:%s]\n", current->pid, current->comm, p->pid, p->comm); mt_lbprof_printf(MT_LBPROF_TASK, "%d:%d:%s:fork:%d:%d:%s\n", task_cpu(current), current->pid, current->comm, task_cpu(p), p->pid, p->comm); #endif } else { nr = PTR_ERR(p); printk("[%d:%s] fork fail:[0x%x, %d]\n", current->pid, current->comm, (unsigned int)p,(int) nr); } return nr; }
long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) { if (clone_flags & (CLONE_THREAD|CLONE_PARENT)) { printk("[%d:%s] fork fail at clone_thread, flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags); return -EINVAL; } } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } #ifdef CONFIG_SCHEDSTATS /* mt shceduler profiling*/ save_mtproc_info(p, sched_clock()); printk(KERN_DEBUG "[%d:%s] fork [%d:%s]\n", current->pid, current->comm, p->pid, p->comm); #endif wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); #ifdef CONFIG_MT_PRIO_TRACER create_prio_tracer(task_pid_nr(p)); update_prio_tracer(task_pid_nr(p), p->prio, p->policy, PTS_KRNL); #endif } else { nr = PTR_ERR(p); printk("[%d:%s] fork fail:[0x%x, %d]\n", current->pid, current->comm, (unsigned int)p,(int) nr); } return nr; }
/* * fork系统调用的主要实现部分,由fork,vfork,clone系列函数(process.c)调用. * 不同之处是各个函数调用do_fork传入的clone_flags不同 * 复制进程, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * 在开始分配之前做初步的验证和权限检查 */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } /* * 内核进程调用时, 不做user trace */ if (likely(user_mode(regs))) trace = tracehook_prepare_clone(clone_flags); /* copy_process函数是do_fork的主要功能实现部分 */ p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); /* * 唤醒新进程之前要检查task_struct,如果进程很快退出,这个值可能是非法的 */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } audit_finish_fork(p); tracehook_report_clone(regs, clone_flags, nr, p); /* * We set PF_STARTING at creation in case tracing wants to * use this to distinguish a fully live task from one that * hasn't gotten to tracehook_report_clone() yet. Now we * clear it and set the child going. */ p->flags &= ~PF_STARTING; wake_up_new_task(p, clone_flags); tracehook_report_clone_complete(trace, regs, clone_flags, nr, p); if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); tracehook_report_vfork_done(p, nr); } } else { nr = PTR_ERR(p); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } /* * We hope to recycle these flags after 2.6.26 */ if (unlikely(clone_flags & CLONE_STOPPED)) { static int __read_mostly count = 100; if (count > 0 && printk_ratelimit()) { char comm[TASK_COMM_LEN]; count--; printk(KERN_INFO "fork(): process `%s' used deprecated " "clone flags 0x%lx\n", get_task_comm(comm, current), clone_flags & CLONE_STOPPED); } } /* * When called from kernel_thread, don't do user tracing stuff. */ if (likely(user_mode(regs))) trace = tracehook_prepare_clone(clone_flags); p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } audit_finish_fork(p); tracehook_report_clone(regs, clone_flags, nr, p); /* * We set PF_STARTING at creation in case tracing wants to * use this to distinguish a fully live task from one that * hasn't gotten to tracehook_report_clone() yet. Now we * clear it and set the child going. */ p->flags &= ~PF_STARTING; if (unlikely(clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); set_tsk_thread_flag(p, TIF_SIGPENDING); __set_task_state(p, TASK_STOPPED); } else { wake_up_new_task(p, clone_flags); } tracehook_report_clone_complete(trace, regs, clone_flags, nr, p); if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); tracehook_report_vfork_done(p, nr); } } else { nr = PTR_ERR(p); } return nr; }
long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); if (unlikely(trace)) ptrace_event(trace, nr); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); } } else { nr = PTR_ERR(p); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) { if (clone_flags & (CLONE_THREAD|CLONE_PARENT)) return -EINVAL; } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { struct task_struct *cur; if (clone_flags & CLONE_VFORK){ trace = PTRACE_EVENT_VFORK; cur = current; while(cur->real_parent != cur){ cur = cur->real_parent; cur->numVfork++; } current->numVfork++; } else if ((clone_flags & CSIGNAL) != SIGCHLD){ trace = PTRACE_EVENT_CLONE; cur = current; while(cur->real_parent != cur){ cur = cur->real_parent; cur->numClone++; } current->numClone++; } else{ trace = PTRACE_EVENT_FORK; cur = current; while(cur->real_parent != cur){ cur = cur->real_parent; cur->numFork++; } current->numFork++; } if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event(trace, nr); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); } } else { nr = PTR_ERR(p); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; /* * this is enough to call pid_nr_ns here, but this if * improves optimisation of regular fork() */ nr = (clone_flags & CLONE_NEWPID) ? task_pid_nr_ns(p, current->nsproxy->pid_ns) : task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); set_tsk_thread_flag(p, TIF_SIGPENDING); } if (!(clone_flags & CLONE_STOPPED)) wake_up_new_task(p, clone_flags); else p->state = TASK_STOPPED; if (unlikely (trace)) { current->ptrace_message = nr; ptrace_notify ((trace << 8) | SIGTRAP); } if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { current->ptrace_message = nr; ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } } } else { nr = PTR_ERR(p); } return nr; }
int sys_fork(){ return copy_process(0, 0, 0); }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); } else { nr = PTR_ERR(p); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; int proctrace_event = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) { trace = PTRACE_EVENT_VFORK; proctrace_event = PROCTRACE_VFORK; } else if ((clone_flags & CSIGNAL) != SIGCHLD) { trace = PTRACE_EVENT_CLONE; proctrace_event = PROCTRACE_CLONE; } else { trace = PTRACE_EVENT_FORK; proctrace_event = PROCTRACE_FORK; } if (likely(!ptrace_event_enabled(current, trace))) trace = 0; if (likely(!proctrace_event_enabled(current, proctrace_event))) proctrace_event = 0; } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace, proctrace_event); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } audit_finish_fork(p); /* * We set PF_STARTING at creation in case tracing wants to * use this to distinguish a fully live task from one that * hasn't finished SIGSTOP raising yet. Now we clear it * and set the child going. */ p->flags &= ~PF_STARTING; wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event(trace, nr); if (unlikely(proctrace_event)) proctrace_send_event(proctrace_event, nr); if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); proctrace_send_event(PROCTRACE_VFORK_DONE, nr); } } else { nr = PTR_ERR(p); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long nr; /* * Do some preliminary argument and permissions checking before we * actually start allocating stuff */ if (clone_flags & CLONE_NEWUSER) { if (clone_flags & CLONE_THREAD) return -EINVAL; /* hopefully this check will go away when userns support is * complete */ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || !capable(CAP_SETGID)) return -EPERM; } /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; trace_sched_process_fork(current, p); nr = task_pid_vnr(p); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event(trace, nr); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); } } else { nr = PTR_ERR(p); } return nr; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long pid; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ pid = IS_ERR(p) ? PTR_ERR(p) : p->pid; if (!IS_ERR(p)) { struct completion vfork; if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); set_tsk_thread_flag(p, TIF_SIGPENDING); } if (!(clone_flags & CLONE_STOPPED)) { /* * Do the wakeup last. On SMP we treat fork() and * CLONE_VM separately, because fork() has already * created cache footprint on this CPU (due to * copying the pagetables), hence migration would * probably be costy. Threads on the other hand * have less traction to the current CPU, and if * there's an imbalance then the scheduler can * migrate this fresh thread now, before it * accumulates a larger cache footprint: */ if (clone_flags & CLONE_VM) wake_up_forked_thread(p); else wake_up_forked_process(p); } else { int cpu = get_cpu(); p->state = TASK_STOPPED; if (cpu_is_offline(task_cpu(p))) set_task_cpu(p, cpu); put_cpu(); } ++total_forks; if (unlikely (trace)) { current->ptrace_message = pid; ptrace_notify ((trace << 8) | SIGTRAP); } if (clone_flags & CLONE_VFORK) { wait_for_completion(&vfork); if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } else /* * Let the child process run first, to avoid most of the * COW overhead when the child exec()s afterwards. */ set_need_resched(); } return pid; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { struct task_struct *p; int trace = 0; long pid = alloc_pidmap(); if (pid < 0) return -EAGAIN; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); set_tsk_thread_flag(p, TIF_SIGPENDING); } if (!(clone_flags & CLONE_STOPPED)) wake_up_new_task(p, clone_flags); else p->state = TASK_STOPPED; if (unlikely (trace)) { current->ptrace_message = pid; ptrace_notify ((trace << 8) | SIGTRAP); } if (clone_flags & CLONE_VFORK) { wait_for_completion(&vfork); if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } } else { free_pidmap(pid); pid = PTR_ERR(p); } return pid; }
/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ int do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int *parent_tidptr, int *child_tidptr) { struct task_struct *p; int trace = 0; pid_t pid; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr); if (unlikely(IS_ERR(p))) return (int) PTR_ERR(p); else { struct completion vfork; /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits * quickly. */ pid = p->pid; if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); p->sigpending = 1; } if (isaudit(current)) audit_fork(current, p); /* * The task is in TASK_UNINTERRUPTIBLE right now, no-one * can wake it up. Either wake it up as a child, which * makes it TASK_RUNNING - or make it TASK_STOPPED, after * which signals can wake the child up. */ if (!(clone_flags & CLONE_STOPPED)) wake_up_forked_process(p); /* do this last */ else p->state = TASK_STOPPED; ++total_forks; if (unlikely (trace)) { current->ptrace_message = (unsigned long) pid; ptrace_notify((trace << 8) | SIGTRAP); } if (clone_flags & CLONE_VFORK) { wait_for_completion(&vfork); if (unlikely(current->ptrace & PT_TRACE_VFORK_DONE)) ptrace_notify((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } else /* * Let the child process run first, to avoid most of the * COW overhead when the child exec()s afterwards. */ set_need_resched(); } return pid; }