/* TODO FIXME YOLO tutaj niewykorzystywane sa wartosci zuzytego czasu itd. ZMIENIC */ int do_noquantum(message *m_ptr) // * do_noquantum: Called on behalf of process' that run out of quantum { register struct schedproc *rmp; int rv, proc_nr_n; if (sched_isokendpt(m_ptr->m_source, &proc_nr_n) != OK) { printf("SCHED: WARNING: got an invalid endpoint in OOQ msg %u.\n", m_ptr->m_source); return EBADEPT; } rmp = &schedproc[proc_nr_n]; // tu dostaję schedproc procesu, którym się bawię // wyciągam to z jakiegoś message *m_ptr, nie wiem co to, ale w sumie co za różnica - ważne // że jest dostęp do procesu // od tego momentu mogę z procesem zrobić wszystko if (rmp->priority < MIN_USER_Q) { //a to jest też liczba kolejek rmp->priority += 1; /* lower priority */ } //Sprawdzenie zużytego czasu systemowego clock_t newTime; sys_times(rmp->endpoint, NULL, &newTime, NULL, NULL); //ZRÓB OBLICZENIA SRUTUTUTUTU rmp->YOLO = newTime; if ((rv = schedule_process_local(rmp)) != OK) { return rv; } return OK; }
void elapsed_time_both(UWord *ms_user, UWord *ms_sys, UWord *ms_user_diff, UWord *ms_sys_diff) { UWord prev_total_user, prev_total_sys; UWord total_user, total_sys; SysTimes now; sys_times(&now); total_user = (now.tms_utime * 1000) / SYS_CLK_TCK; total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK; if (ms_user != NULL) *ms_user = total_user; if (ms_sys != NULL) *ms_sys = total_sys; erts_smp_mtx_lock(&erts_timeofday_mtx); prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK; prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK; t_start = now; erts_smp_mtx_unlock(&erts_timeofday_mtx); if (ms_user_diff != NULL) *ms_user_diff = total_user - prev_total_user; if (ms_sys_diff != NULL) *ms_sys_diff = total_sys - prev_total_sys; }
static void sys_profile_call_back( char *func, char **previous_call_back ) { if ( previous_call_back ) *previous_call_back = (char *)timer_callback; timer_callback = (callback_func *)func; // Ensure the timer is started (void)sys_times( (unsigned long *)0 ); }
/*===========================================================================* * do_getrusage * *===========================================================================*/ int do_getrusage(void) { clock_t user_time, sys_time; struct rusage r_usage; int r, children; if (m_in.m_lc_pm_rusage.who != RUSAGE_SELF && m_in.m_lc_pm_rusage.who != RUSAGE_CHILDREN) return EINVAL; /* * TODO: first relay the call to VFS. As is, VFS does not have any * fields it can fill with meaningful values, but this may change in * the future. In that case, PM would first have to use the tell_vfs() * system to get those values from VFS, and do the rest here upon * getting the response. */ memset(&r_usage, 0, sizeof(r_usage)); children = (m_in.m_lc_pm_rusage.who == RUSAGE_CHILDREN); /* * Get system times. For RUSAGE_SELF, get the times for the calling * process from the kernel. For RUSAGE_CHILDREN, we already have the * values we should return right here. */ if (!children) { if ((r = sys_times(who_e, &user_time, &sys_time, NULL, NULL)) != OK) return r; } else { user_time = mp->mp_child_utime; sys_time = mp->mp_child_stime; } /* In both cases, convert from clock ticks to microseconds. */ set_rusage_times(&r_usage, user_time, sys_time); /* Get additional fields from VM. */ if ((r = vm_getrusage(who_e, &r_usage, children)) != OK) return r; /* Finally copy the structure to the caller. */ return sys_datacopy(SELF, (vir_bytes)&r_usage, who_e, m_in.m_lc_pm_rusage.addr, (vir_bytes)sizeof(r_usage)); }
/*===========================================================================* * do_times * *===========================================================================*/ PUBLIC int do_times() { /* Perform the times(buffer) system call. */ register struct mproc *rmp = mp; clock_t t[5]; int s; if (OK != (s=sys_times(who, t))) panic(__FILE__,"do_times couldn't get times", s); rmp->mp_reply.reply_t1 = t[0]; /* user time */ rmp->mp_reply.reply_t2 = t[1]; /* system time */ rmp->mp_reply.reply_t3 = rmp->mp_child_utime; /* child user time */ rmp->mp_reply.reply_t4 = rmp->mp_child_stime; /* child system time */ rmp->mp_reply.reply_t5 = t[4]; /* uptime since boot */ return(OK); }
static void sys_profile_frequency( int freq, int *previous_freq ) { // Requested HZ: // 0 => tell me the current value (no change, IMPLEMENTED HERE) // - 1 => tell me the slowest (no change) // - 2 => tell me the default (no change, IMPLEMENTED HERE) // -nnn => tell me what you would choose for nnn (no change) // MIN_INT => tell me the fastest (no change) // // 1 => tell me the slowest (sets the clock) // MAX_INT => tell me the fastest (sets the clock) // Ensure the timer is started (void)sys_times( (unsigned long *)0 ); if ( -2 == freq ) freq = TICKS_PER_SEC; // default value else if ( 0 == freq ) freq = set_freq; // collect current value else { int do_set_freq = (freq > 0); unsigned int period = CYGNUM_HAL_RTC_PERIOD; if ( 0 == (freq ^ -freq) ) // Then it's MIN_INT in local size freq++; // just so that it will negate correctly // Then set the timer to that fast - or pass on the enquiry #ifdef HAL_CLOCK_REINITIALIZE // Give the HAL enough info to do the division sum relative to // the default setup, in period and TICKS_PER_SEC. HAL_CLOCK_REINITIALIZE( freq, period, TICKS_PER_SEC ); #else freq = TICKS_PER_SEC; // the only choice #endif if ( do_set_freq ) { // update the global variables unsigned int orig = set_freq; set_freq = freq; set_period = period; // We must "correct" sys_timer_ticks for the new scale factor. sys_timer_ticks = sys_timer_ticks * set_freq / orig; } } if ( previous_freq ) // Return the current value (new value) *previous_freq = freq; }
/*===========================================================================* * do_start_scheduling * *===========================================================================*/ int do_start_scheduling(message *m_ptr) { register struct schedproc *rmp; int rv, proc_nr_n, parent_nr_n; /* we can handle two kinds of messages here */ assert(m_ptr->m_type == SCHEDULING_START || m_ptr->m_type == SCHEDULING_INHERIT); /* check who can send you requests */ if (!accept_message(m_ptr)) return EPERM; /* Resolve endpoint to proc slot. */ if ((rv = sched_isemtyendpt(m_ptr->SCHEDULING_ENDPOINT, &proc_nr_n)) != OK) { return rv; } rmp = &schedproc[proc_nr_n]; /* Populate process slot */ rmp->endpoint = m_ptr->SCHEDULING_ENDPOINT; rmp->parent = m_ptr->SCHEDULING_PARENT; rmp->max_priority = (unsigned) m_ptr->SCHEDULING_MAXPRIO; rmp->YOLO = sys_times(rmp->endpoint, NULL, rmp->YOLO, NULL, NULL); if (rmp->max_priority >= NR_SCHED_QUEUES) { return EINVAL; } /* Inherit current priority and time slice from parent. Since there * is currently only one scheduler scheduling the whole system, this * value is local and we assert that the parent endpoint is valid */ if (rmp->endpoint == rmp->parent) { /* We have a special case here for init, which is the first process scheduled, and the parent of itself. */ rmp->priority = USER_Q; rmp->time_slice = DEFAULT_USER_TIME_SLICE; /* * Since kernel never changes the cpu of a process, all are * started on the BSP and the userspace scheduling hasn't * changed that yet either, we can be sure that BSP is the * processor where the processes run now. */ #ifdef CONFIG_SMP rmp->cpu = machine.bsp_id; /* FIXME set the cpu mask */ #endif } switch (m_ptr->m_type) { case SCHEDULING_START: /* We have a special case here for system processes, for which * quantum and priority are set explicitly rather than inherited * from the parent */ rmp->priority = rmp->max_priority; rmp->time_slice = (unsigned) m_ptr->SCHEDULING_QUANTUM; break; case SCHEDULING_INHERIT: /* Inherit current priority and time slice from parent. Since there * is currently only one scheduler scheduling the whole system, this * value is local and we assert that the parent endpoint is valid */ if ((rv = sched_isokendpt(m_ptr->SCHEDULING_PARENT, &parent_nr_n)) != OK) return rv; rmp->priority = schedproc[parent_nr_n].priority; rmp->time_slice = schedproc[parent_nr_n].time_slice; break; default: /* not reachable */ assert(0); } /* Take over scheduling the process. The kernel reply message populates * the processes current priority and its time slice */ if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) { printf("Sched: Error taking over scheduling for %d, kernel said %d\n", rmp->endpoint, rv); return rv; } rmp->flags = IN_USE; /* Schedule the process, giving it some quantum */ pick_cpu(rmp); while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) { /* don't try this CPU ever again */ cpu_proc[rmp->cpu] = CPU_DEAD; pick_cpu(rmp); } if (rv != OK) { printf("Sched: Error while scheduling process, kernel replied %d\n", rv); return rv; } /* Mark ourselves as the new scheduler. * By default, processes are scheduled by the parents scheduler. In case * this scheduler would want to delegate scheduling to another * scheduler, it could do so and then write the endpoint of that * scheduler into SCHEDULING_SCHEDULER */ m_ptr->SCHEDULING_SCHEDULER = SCHED_PROC_NR; return OK; }
// // Generic syscall handler. // // Returns 0 if syscall number is not handled by this // module, 1 otherwise. This allows applications to // extend the syscall handler by using exception chaining. // CYG_ADDRWORD __do_syscall(CYG_ADDRWORD func, // syscall function number CYG_ADDRWORD arg1, CYG_ADDRWORD arg2, // up to four args. CYG_ADDRWORD arg3, CYG_ADDRWORD arg4, CYG_ADDRWORD *retval, CYG_ADDRWORD *sig) // syscall return value { int err = 0; *sig = 0; switch (func) { case SYS_open: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_open( const char *name, int flags, int mode, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_open((const char *)arg1, (int)arg2, (int)arg3, (int *)sig); else #endif err = sys_open((const char *)arg1, (int)arg2, (int)arg3); break; } case SYS_read: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_read( int fd, void *buf, size_t count, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_read((int)arg1, (void *)arg2, (size_t)arg3, (int *)sig); else #endif err = sys_read((int)arg1, (char *)arg2, (int)arg3); break; } case SYS_write: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_write( int fd, const void *buf, size_t count, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_write((int)arg1, (const void *)arg2, (size_t)arg3, (int *)sig); else #endif err = sys_write((int)arg1, (char *)arg2, (int)arg3); break; } case SYS_close: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_close( int fd, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_close((int)arg1, (int *)sig); else #endif err = sys_close((int)arg1); break; } case SYS_lseek: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_lseek( int fd, long offset, int whence, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_lseek((int)arg1, (long)arg2, (int)arg3, (int *)sig); else #endif err = sys_lseek((int)arg1, (int)arg2, (int)arg3); break; } case SYS_stat: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_stat( const char *pathname, void *statbuf, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_stat((const char *)arg1, (void *)arg2, (int *)sig); else #endif err = -NEWLIB_ENOSYS; break; } case SYS_fstat: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_fstat( int fd, void *statbuf, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_fstat((int)arg1, (void *)arg2, (int *)sig); else #endif { struct newlib_stat *st = (struct newlib_stat *)arg2; st->st_mode = NEWLIB_S_IFCHR; st->st_blksize = 4096; err = 0; } break; } case SYS_rename: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_rename( const char *oldpath, const char *newpath, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_rename((const char *)arg1, (const char *)arg2, (int *)sig); else #endif err = -NEWLIB_ENOSYS; break; } case SYS_unlink: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_unlink( const char *pathname, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_unlink((const char *)arg1, (int *)sig); else #endif err = -NEWLIB_ENOSYS; break; } case SYS_isatty: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_isatty( int fd, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_isatty((int)arg1, (int *)sig); else #endif err = 1; break; } case SYS_system: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_system( const char *command, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_system((const char *)arg1, (int *)sig); else #endif err = -1; break; } case SYS_gettimeofday: { #ifdef CYGPKG_HAL_GDB_FILEIO // File I/O over the GDB remote protocol __externC int cyg_hal_gdbfileio_gettimeofday( void *tv, void *tz, int *sig ); if (gdb_active) err = cyg_hal_gdbfileio_gettimeofday((void *)arg1, (void *)arg2, (int *)sig); else #endif err = 0; break; } case SYS_utime: // FIXME. Some libglosses depend on this behavior. err = sys_times((unsigned long *)arg1); break; case SYS_times: err = sys_times((unsigned long *)arg1); break; case SYS_meminfo: err = 1; *(unsigned long *)arg1 = (unsigned long)(ram_end-ram_start); *(unsigned long *)arg2 = (unsigned long)ram_end; break; #ifdef CYGSEM_REDBOOT_BSP_SYSCALLS_GPROF case SYS_timer_call_back: sys_profile_call_back( (char *)arg1, (char **)arg2 ); break; case SYS_timer_frequency: sys_profile_frequency( (int)arg1, (unsigned int *)arg2 ); break; case SYS_timer_reset: sys_profile_reset(); break; #endif // CYGSEM_REDBOOT_BSP_SYSCALLS_GPROF case __GET_SHARED: *(__shared_t **)arg1 = &__shared_data; break; case SYS_exit: *sig = -1; // signal exit err = arg1; if (gdb_active) { #ifdef CYGOPT_REDBOOT_BSP_SYSCALLS_EXIT_WITHOUT_TRAP __send_exit_status((int)arg1); #else *sig = SIGTRAP; err = func; #endif // CYGOPT_REDBOOT_BSP_SYSCALLS_EXIT_WITHOUT_TRAP } break; default: return 0; } *retval = err; return 1; }