/* TODO FIXME YOLO sprawdzic, czy przypadkiem ktos z priorytetem, ale z zerowym kwantem nie dostanie procesora pula zetonow - w sched proc za kazdym czasem jak wyczerpie kwant(do_noquantum), obliczamy ile zetonow mial, zuzyl, etc.; jesli za duzo, to nie przydzielamy kwantow */ static int schedule_process(struct schedproc * rmp, unsigned flags) { int err; int new_prio, new_quantum, new_cpu; pick_cpu(rmp); if (flags & SCHEDULE_CHANGE_PRIO) new_prio = rmp->priority; else new_prio = -1; if (flags & SCHEDULE_CHANGE_QUANTUM) new_quantum = rmp->time_slice; else new_quantum = -1; if (flags & SCHEDULE_CHANGE_CPU) new_cpu = rmp->cpu; else new_cpu = -1; if ((err = sys_schedule(rmp->endpoint, new_prio, new_quantum, new_cpu)) != OK) { printf("PM: An error occurred when trying to schedule %d: %d\n", rmp->endpoint, err); } return err; }
/*===========================================================================* * do_start_scheduling * *===========================================================================*/ int do_start_scheduling(message *m_ptr) { register struct schedproc *rmp; int rv, proc_nr_n, parent_nr_n; /* we can handle two kinds of messages here */ assert(m_ptr->m_type == SCHEDULING_START || m_ptr->m_type == SCHEDULING_INHERIT); /* check who can send you requests */ if (!accept_message(m_ptr)) return EPERM; /* Resolve endpoint to proc slot. */ if ((rv = sched_isemtyendpt(m_ptr->SCHEDULING_ENDPOINT, &proc_nr_n)) != OK) { return rv; } rmp = &schedproc[proc_nr_n]; /* Populate process slot */ rmp->endpoint = m_ptr->SCHEDULING_ENDPOINT; rmp->parent = m_ptr->SCHEDULING_PARENT; rmp->max_priority = (unsigned) m_ptr->SCHEDULING_MAXPRIO; rmp->YOLO = sys_times(rmp->endpoint, NULL, rmp->YOLO, NULL, NULL); if (rmp->max_priority >= NR_SCHED_QUEUES) { return EINVAL; } /* Inherit current priority and time slice from parent. Since there * is currently only one scheduler scheduling the whole system, this * value is local and we assert that the parent endpoint is valid */ if (rmp->endpoint == rmp->parent) { /* We have a special case here for init, which is the first process scheduled, and the parent of itself. */ rmp->priority = USER_Q; rmp->time_slice = DEFAULT_USER_TIME_SLICE; /* * Since kernel never changes the cpu of a process, all are * started on the BSP and the userspace scheduling hasn't * changed that yet either, we can be sure that BSP is the * processor where the processes run now. */ #ifdef CONFIG_SMP rmp->cpu = machine.bsp_id; /* FIXME set the cpu mask */ #endif } switch (m_ptr->m_type) { case SCHEDULING_START: /* We have a special case here for system processes, for which * quantum and priority are set explicitly rather than inherited * from the parent */ rmp->priority = rmp->max_priority; rmp->time_slice = (unsigned) m_ptr->SCHEDULING_QUANTUM; break; case SCHEDULING_INHERIT: /* Inherit current priority and time slice from parent. Since there * is currently only one scheduler scheduling the whole system, this * value is local and we assert that the parent endpoint is valid */ if ((rv = sched_isokendpt(m_ptr->SCHEDULING_PARENT, &parent_nr_n)) != OK) return rv; rmp->priority = schedproc[parent_nr_n].priority; rmp->time_slice = schedproc[parent_nr_n].time_slice; break; default: /* not reachable */ assert(0); } /* Take over scheduling the process. The kernel reply message populates * the processes current priority and its time slice */ if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) { printf("Sched: Error taking over scheduling for %d, kernel said %d\n", rmp->endpoint, rv); return rv; } rmp->flags = IN_USE; /* Schedule the process, giving it some quantum */ pick_cpu(rmp); while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) { /* don't try this CPU ever again */ cpu_proc[rmp->cpu] = CPU_DEAD; pick_cpu(rmp); } if (rv != OK) { printf("Sched: Error while scheduling process, kernel replied %d\n", rv); return rv; } /* Mark ourselves as the new scheduler. * By default, processes are scheduled by the parents scheduler. In case * this scheduler would want to delegate scheduling to another * scheduler, it could do so and then write the endpoint of that * scheduler into SCHEDULING_SCHEDULER */ m_ptr->SCHEDULING_SCHEDULER = SCHED_PROC_NR; return OK; }
/*===========================================================================* * schedule_process * *===========================================================================*/ static int schedule_process(struct schedproc * rmp, unsigned flags) { int err; int new_prio, new_quantum, new_cpu; pick_cpu(rmp); if (flags & SCHEDULE_CHANGE_PRIO) new_prio = rmp->priority; else new_prio = -1; if (flags & SCHEDULE_CHANGE_QUANTUM) new_quantum = rmp->time_slice; else new_quantum = -1; if (flags & SCHEDULE_CHANGE_CPU) new_cpu = rmp->cpu; else new_cpu = -1; sys_getproctab((struct proc *) &tempProc); const char* currentName = tempProc[_ENDPOINT_P(rmp->endpoint) + 5].p_name; unsigned realRuntime = tempProc[_ENDPOINT_P(rmp->endpoint) + 5].p_cycles; int fake_proc_flag = 0; for (int i = 0; i < PROCNUM; i++) { if (!strcmp(sjf[i].p_name, currentName)) { sjf[i].p_endpoint = rmp->endpoint; sjf[i].predBurst = ALPHA * realRuntime + (1 - ALPHA) * sjf[i].predBurst; fake_proc_flag = 1; } } printf("Before flag \n"); if (fake_proc_flag == 1) { printf("In flag statement \n"); int c, d, i; struct sjf swap; for (c = 0; c < (PROCNUM - 1); c++) { for (d = 0; d < (PROCNUM - c - 1); d++) { if (sjf[d].predBurst > sjf[d+1].predBurst) { swap = sjf[d]; sjf[d] = sjf[d+1]; sjf[d+1] = swap; } } } for (i = 0; i < PROCNUM; i++) { printf("Process name: %s - Predicted Runtime: %ld", sjf[i].p_name, sjf[i].predBurst); } for (i = PROCNUM - 1; i >= 0; i--) { sys_qptab(sjf[i].p_endpoint); } } printf("After flag, before break \n"); if ((err = sys_schedule(rmp->endpoint, new_prio, new_quantum, new_cpu)) != OK) { printf("PM: An error occurred when trying to schedule %d: %d\n", rmp->endpoint, err); } else{ OSSendPtab(); } return err; }
/*===========================================================================* * do_start_scheduling * *===========================================================================*/ int do_start_scheduling(message *m_ptr) { register struct schedproc *rmp; int rv, proc_nr_n, parent_nr_n; /* we can handle two kinds of messages here */ assert(m_ptr->m_type == SCHEDULING_START || m_ptr->m_type == SCHEDULING_INHERIT); /* check who can send you requests */ if (!accept_message(m_ptr)) return EPERM; /* Resolve endpoint to proc slot. */ if ((rv = sched_isemtyendpt(m_ptr->SCHEDULING_ENDPOINT, &proc_nr_n)) != OK) { return rv; } rmp = &schedproc[proc_nr_n]; /* Populate process slot */ rmp->endpoint = m_ptr->SCHEDULING_ENDPOINT; rmp->parent = m_ptr->SCHEDULING_PARENT; rmp->max_priority = (unsigned) m_ptr->SCHEDULING_MAXPRIO; if (rmp->max_priority >= NR_SCHED_QUEUES) { return EINVAL; } /* Inherit current priority and time slice from parent. Since there * is currently only one scheduler scheduling the whole system, this * value is local and we assert that the parent endpoint is valid */ if (rmp->endpoint == rmp->parent) { /* We have a special case here for init, which is the first process scheduled, and the parent of itself. */ rmp->priority = USER_Q; rmp->time_slice = DEFAULT_USER_TIME_SLICE; /* * Since kernel never changes the cpu of a process, all are * started on the BSP and the userspace scheduling hasn't * changed that yet either, we can be sure that BSP is the * processor where the processes run now. */ #ifdef CONFIG_SMP rmp->cpu = machine.bsp_id; /* FIXME set the cpu mask */ #endif } switch (m_ptr->m_type) { case SCHEDULING_START: /* We have a special case here for system processes, for which * quanum and priority are set explicitly rather than inherited * from the parent */ rmp->priority = rmp->max_priority; rmp->time_slice = (unsigned) m_ptr->SCHEDULING_QUANTUM; break; case SCHEDULING_INHERIT: /* Inherit current priority and time slice from parent. Since there * is currently only one scheduler scheduling the whole system, this * value is local and we assert that the parent endpoint is valid */ if ((rv = sched_isokendpt(m_ptr->SCHEDULING_PARENT, &parent_nr_n)) != OK) return rv; rmp->priority = schedproc[parent_nr_n].priority; rmp->time_slice = schedproc[parent_nr_n].time_slice; // sys_getproctab((struct proc *) &tempProc); // const char* currentName = tempProc[_ENDPOINT_P(rmp->endpoint) + 5].p_name; // unsigned realRuntime = tempProc[_ENDPOINT_P(rmp->endpoint) + 5].p_cycles; // int fake_proc_flag = 0; // for (int i = 0; i < PROCNUM; i++) { // if (!strcmp(sjf[i].p_name, currentName)) { // sjf[i].p_endpoint = rmp->endpoint; // sjf[i].predBurst = ALPHA * realRuntime + (1 - ALPHA) * sjf[i].predBurst; // fake_proc_flag = 1; // } // } // printf("Before flag \n"); // if (fake_proc_flag == 1) { // printf("In flag statement \n"); // int c, d, i; // struct sjf swap; // for (c = 0; c < (PROCNUM - 1); c++) { // for (d = 0; d < (PROCNUM - c - 1); d++) { // if (sjf[d].predBurst > sjf[d+1].predBurst) { // swap = sjf[d]; // sjf[d] = sjf[d+1]; // sjf[d+1] = swap; // } // } // } // for (i = 0; i < PROCNUM; i++) { // printf("Process name: %s - Predicted Runtime: %ld", sjf[i].p_name, sjf[i].predBurst); // } // for (i = PROCNUM - 1; i >= 0; i--) { // sys_qptab(sjf[i].p_endpoint); // } // } else { // rmp->priority = schedproc[parent_nr_n].priority; // rmp->time_slice = schedproc[parent_nr_n].time_slice; // } // printf("After flag, before break \n"); break; default: /* not reachable */ assert(0); } /* Take over scheduling the process. The kernel reply message populates * the processes current priority and its time slice */ if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) { printf("Sched: Error taking over scheduling for %d, kernel said %d\n", rmp->endpoint, rv); return rv; } rmp->flags = IN_USE; /* Schedule the process, giving it some quantum */ pick_cpu(rmp); while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) { /* don't try this CPU ever again */ cpu_proc[rmp->cpu] = CPU_DEAD; pick_cpu(rmp); } if (rv != OK) { printf("Sched: Error while scheduling process, kernel replied %d\n", rv); return rv; } /* Mark ourselves as the new scheduler. * By default, processes are scheduled by the parents scheduler. In case * this scheduler would want to delegate scheduling to another * scheduler, it could do so and then write the endpoint of that * scheduler into SCHEDULING_SCHEDULER */ m_ptr->SCHEDULING_SCHEDULER = SCHED_PROC_NR; return OK; }
static void do_random_experiment(FILE* outfile, int num_cpus, int wss, int sleep_min, int sleep_max, int write_cycle, int sample_count, int best_effort) { int last_cpu, next_cpu, delay, show = 1, i; unsigned long preempt_counter = 0; unsigned long migration_counter = 0; unsigned long counter = 1; unsigned long num_pages = wss / getpagesize(); unsigned long *phys_addrs; cycles_t start, stop; cycles_t cold, hot1, hot2, hot3, after_resume; int *mem; if (!num_pages) num_pages = 1; phys_addrs = malloc(sizeof(long) * num_pages); migrate_to(0); last_cpu = 0; /* prefault and dirty cache */ reset_arena(); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) iopl(3); #endif fprintf(outfile, "# %5s, %6s, %6s, %6s, %3s, %3s" ", %10s, %10s, %10s, %10s, %10s" ", %12s, %12s" "\n", "COUNT", "WCYCLE", "WSS", "DELAY", "SRC", "TGT", "COLD", "HOT1", "HOT2", "HOT3", "WITH-CPMD", "VIRT ADDR", "PHYS ADDR"); while (!sample_count || sample_count >= preempt_counter || (num_cpus > 1 && sample_count >= migration_counter)) { delay = sleep_min + random() % (sleep_max - sleep_min + 1); next_cpu = pick_cpu(last_cpu, num_cpus); if (sample_count) show = (next_cpu == last_cpu && sample_count >= preempt_counter) || (next_cpu != last_cpu && sample_count >= migration_counter); mem = allocate(wss); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) cli(); #endif start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); cold = stop - start; start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); hot1 = stop - start; start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); hot2 = stop - start; start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); hot3 = stop - start; #if defined(__i386__) || defined(__x86_64__) if (!best_effort) sti(); #endif migrate_to(next_cpu); sleep_us(delay); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) cli(); #endif start = get_cycles(); mem[0] = touch_mem(mem, wss, write_cycle); stop = get_cycles(); #if defined(__i386__) || defined(__x86_64__) if (!best_effort) sti(); #endif after_resume = stop - start; /* run, write ratio, wss, delay, from, to, cold, hot1, hot2, * hot3, after_resume */ if (show) { fprintf(outfile, " %6ld, %6d, %6d, %6d, %3d, %3d, " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%10" CYCLES_FMT ", " "%12lu", counter++, write_cycle, wss, delay, last_cpu, next_cpu, cold, hot1, hot2, hot3, after_resume, (unsigned long) mem); get_phys_addrs(0, (unsigned long) mem, wss * 1024 + (unsigned long) mem, phys_addrs, wss); for (i = 0; i < num_pages; i++) fprintf(outfile, ", %12lu", phys_addrs[i]); fprintf(outfile, "\n"); } if (next_cpu == last_cpu) preempt_counter++; else migration_counter++; last_cpu = next_cpu; deallocate(mem); } free(phys_addrs); }
/*===========================================================================* * schedule_process * *===========================================================================*/ static int schedule_process(struct schedproc * rmp, unsigned flags) { int err; unsigned long short_time; endpoint_t shortest_process = 0; int new_prio, new_quantum, new_cpu; unsigned long last_time; pick_cpu(rmp); if (flags & SCHEDULE_CHANGE_PRIO) new_prio = rmp->priority; else new_prio = -1; if (flags & SCHEDULE_CHANGE_QUANTUM) new_quantum = rmp->time_slice; else new_quantum = -1; if (flags & SCHEDULE_CHANGE_CPU) new_cpu = rmp->cpu; else new_cpu = -1; if ((err = sys_schedule(rmp->endpoint, new_prio, new_quantum, new_cpu)) != OK) { printf("PM: An error occurred when trying to schedule %d: %d\n", rmp->endpoint, err); } else{ OSSendPtab(); int chosen_index; if(recordSched) { print_count++; short_time = INT_MAX; if(print_count == 20) { printf("Queue:\n"); } for(int l=0; l<PROCNUM; l++) { if ((sjf[l].p_endpoint == rmp->endpoint) && !sjf[l].is_blocked) { //Recalculate Burst last_time = sjf[l].ticks; sjf[l].predBurst = ALPHA*last_time + (1-ALPHA)*sjf[l].predBurst; } if ((sjf[l].predBurst < short_time) && !sjf[l].is_blocked && sjf[l].p_endpoint) { short_time = sjf[l].predBurst; shortest_process = sjf[l].p_endpoint; chosen_index = l; } if(print_count == 20) { printf(" Proc%d's Predicted Burst: %ul Blocked: ", l+1, sjf[l].predBurst); if(sjf[l].is_blocked) { printf("Yes"); } else { printf("No"); } printf(" Exited: "); if(sjf[l].p_endpoint) { printf("No\n"); } else { printf("Yes\n"); } } } if(print_count == 20) { printf(" Proc%d's Chosen\n", chosen_index+1); } chosen_index = NULL; if(print_count == 20) { printf("**************************************\n"); } err = sys_qptab(shortest_process); } } return err; }