/*static*/void cmd_ps(const char* param_buf) { list_node_t *cur; int i; // print on-core task printk("\n#######################################\ncore-0: "); print_task(scheduler.cur_task[0]); printk("core-1: "); print_task(scheduler.cur_task[1]); printk("core-2: "); print_task(scheduler.cur_task[2]); printk("core-3: "); print_task(scheduler.cur_task[3]); // print ready task int count = ready_task_count(); if (!count) { printk("no ready task\n"); } else { printk("ready task count: %d\n", count); for (cur = scheduler.ready_list.head, i = 0; cur; cur = cur->next, i++) { printk("ready-%d: ", i); print_task(cur->value); } } printk("#######################################\n"); }
void print_rq_at_KE(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; unsigned long flags; int locked; SEQ_printf(m, "\nrunnable tasks:\n" " task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); //read_lock_irqsave(&tasklist_lock, flags); locked = read_trylock_n_irqsave(&tasklist_lock, &flags, m, "print_rq_at_KE"); do_each_thread(g, p) { if (!p->on_rq || task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); } while_each_thread(g, p); if (locked) read_unlock_irqrestore(&tasklist_lock, flags); }
void TASK_dump(u8_t io) { ioprint(io, "TASK SYSTEM\n-----------\n"); print_task(io, task_sys.current, " current"); char lst[sizeof(TASK_DUMP_OUTPUT)]; memcpy(lst, TASK_DUMP_OUTPUT, sizeof(TASK_DUMP_OUTPUT)); char* p = (char*)strchr(lst, '_'); task* ct = (task *)task_sys.head; int ix = 1; while (ct) { sprint(p, "%02i", ix++); print_task(io, ct, lst); ct = ct->_next; } print_task(io, (task *)task_sys.last, " last "); ioprint(io, " pool bitmap "); for (ix = 0; ix < sizeof(task_pool.mask)/sizeof(task_pool.mask[0]); ix++) { ioprint(io, "%032b ", task_pool.mask[ix]); } ioprint(io, "\n"); for (ix = 0; ix < sizeof(task_pool.mask)/sizeof(task_pool.mask[0]); ix++) { int bit; for (bit = 0; bit < 32; bit++) { if ((task_pool.mask[ix] & (1<<bit)) == 0) { print_task(io, &task_pool.task[ix*32 + bit], " "); } } } ioprint(io, "\n"); ioprint(io, " timers\n"); char lst2[sizeof(TASK_TIM_DUMP_OUTPUT)]; memcpy(lst2, TASK_TIM_DUMP_OUTPUT, sizeof(TASK_TIM_DUMP_OUTPUT)); p = (char*)strchr(lst2, '_'); task_timer* tt = task_sys.first_timer; ix = 1; time now = SYS_get_time_ms(); while (tt) { sprint(p, "%02i", ix++); print_timer(io, tt, lst2, now); tt = tt->_next; } }
void print_taskq(Task_Queue *tq) { Task *t ; printf( "TaskQ: %ld tasks in the queue\n", taskq_length(tq) ) ; for( t = taskq_top(tq) ; t ; t = t->next ) { printf( " " ) ; print_task( t ) ; } }
void TASK_dump_pool(u8_t io) { int i, j; for (i = 0; i <= (CONFIG_TASK_POOL-1)/32; i++) { if (task_pool.mask[i]) { for (j = 0; j < 32; j++) { int ix = i*32+j; ioprint(io, "TASK %i @ %08x\n", ix, &task_pool.task[ix]); print_task(io, &task_pool.task[ix], ""); } } } }
static void print_timer(u8_t io, task_timer *t, const char *prefix, time now) { if (t) { ioprint(io, "%s %s start:%08x (%+08x) recurrent:%08x next:%08x [%s]\n", prefix, t->alive ? "ALIVE": "DEAD ", (u32_t)t->start_time, (u32_t)(t->start_time - now), (u32_t)(t->recurrent_time), (u32_t)(t->_next), t->name); print_task(io, t->task, prefix); } else { ioprint(io, "%s NONE\n", prefix); } }
static int mt_sched_debug_show(struct seq_file *m, void *v) { struct task_struct *g, *p; unsigned long flags; SEQ_printf(m, "=== mt Scheduler Profiling ===\n"); SEQ_printf(m, "\nrunnable tasks:\n" " task PID switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { print_task(m, p); } while_each_thread(g, p);
int print_hub(t_taupe *taupe) { wclear(taupe->top->win); print_sys(taupe->sys, taupe->top); print_task(taupe->task, taupe->top); print_cpu(taupe->cpu, taupe->top); print_mem(taupe->mem, taupe->top); if (taupe->signal->on) { print_opt(taupe->signal, "Send Signal"); print_signal(taupe->signal, taupe->pid); } else if (taupe->sort->on) print_opt(taupe->sort, "Sort By"); print_main(taupe); return (0); }
void *consumer(void *arg) { int cpu_number = (int) arg; thread_cpu_t cpu_queue = cpu[cpu_number]; printf("Hello from consumer %d\n", cpu_number); sleep(4); while(cpu_queue.size < 1) { sleep(1); } task_t task = cpu_queue.queue[0]; task.last_cpu = cpu_number; print_task(task); printf("Bye from consumer %d\n", cpu_number); thread_finished += 1; pthread_exit(NULL); }
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; SEQ_printf(m, "\nrunnable tasks:\n" " task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); rcu_read_lock(); for_each_process_thread(g, p) { if (task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); } rcu_read_unlock(); }
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; unsigned long flags; SEQ_printf(m, "\nrunnable tasks:\n" " task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); read_lock_irqsave(&tasklist_lock, flags); for_each_process_thread(g, p) { if (!p->on_rq || task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); } read_unlock_irqrestore(&tasklist_lock, flags); }
/* This function enables to start up the simulator with * a given scheduling algorithm and specified task list. * */ void sched_start(slist_t* task_list, struct sched_class* sc) { task_t *cur,*next; int task_cnt=0; pthread_t sim_cpu[MAX_CPUS]; long cpu=0; int i=0; sched_init(sc); /* Traverse the task_list to wake up new tasks*/ cur=head_slist(task_list); while(task_cnt<task_list->size) { cur->last_cpu=task_cnt%nr_cpus; /* Hack to automatize load balancing at the beginning */ if (init_task_sched(cur)) { perror("Couldn't initialize class specific data for thread "); exit(1); } schedule_wake_up_new_task(cur); /*Just inserts the sched_event in the event_queue */ if (debug_mode) print_task(cur); cur=next_slist(task_list,cur); task_cnt++; } if (debug_mode) { printf("Scheduler initialized. Press ENTER to start simulation.\n"); getchar(); } /* Create per-CPU simulation threads */ for (cpu=0; cpu<nr_cpus; cpu++) pthread_create(&sim_cpu[cpu],NULL,sched_cpu,(void*)cpu); /* Wait for completion of per-CPU simulation threads*/ for (cpu=0; cpu<nr_cpus; cpu++) pthread_join(sim_cpu[cpu],NULL); /* Print log information to aid in * the construction of the gantt diagram * */ cur=head_slist(task_list); for(i=0; i<task_cnt; i++) { print_task_log_registers(cur); cur=next_slist(task_list,cur); } /* Free up class-specific task data and task structures */ cur=head_slist(task_list); for(i=0; i<task_cnt; i++) { next=next_slist(task_list,cur); if (active_sched_class->task_free) active_sched_class->task_free(cur); free(cur); cur=next; } /* Free up sched-class specific resources if necessary */ if (active_sched_class->sched_destroy) active_sched_class->sched_destroy(); printf("Simulation completed\n"); sched_terminate(0); }
int main(int argc, char *argv[]) { const struct hwloc_topology_support *support; hwloc_topology_t topology; hwloc_const_bitmap_t topocpuset; hwloc_bitmap_t cpuset; unsigned long flags = 0; DIR *dir; struct dirent *dirent; int show_all = 0; int show_threads = 0; int get_last_cpu_location = 0; char *callname; char *pidcmd = NULL; int err; int opt; callname = strrchr(argv[0], '/'); if (!callname) callname = argv[0]; else callname++; /* skip argv[0], handle options */ argc--; argv++; hwloc_utils_check_api_version(callname); while (argc >= 1) { opt = 0; if (!strcmp(argv[0], "-a")) show_all = 1; else if (!strcmp(argv[0], "-l") || !strcmp(argv[0], "--logical")) { logical = 1; } else if (!strcmp(argv[0], "-p") || !strcmp(argv[0], "--physical")) { logical = 0; } else if (!strcmp(argv[0], "-c") || !strcmp(argv[0], "--cpuset")) { show_cpuset = 1; } else if (!strcmp(argv[0], "-e") || !strncmp(argv[0], "--get-last-cpu-location", 10)) { get_last_cpu_location = 1; } else if (!strcmp(argv[0], "-t") || !strcmp(argv[0], "--threads")) { #ifdef HWLOC_LINUX_SYS show_threads = 1; #else fprintf (stderr, "Listing threads is currently only supported on Linux\n"); #endif } else if (!strcmp (argv[0], "--whole-system")) { flags |= HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM; } else if (!strcmp (argv[0], "--pid-cmd")) { if (argc < 2) { usage(callname, stdout); exit(EXIT_FAILURE); } pidcmd = argv[1]; opt = 1; } else { fprintf (stderr, "Unrecognized option: %s\n", argv[0]); usage (callname, stderr); exit(EXIT_FAILURE); } argc -= opt+1; argv += opt+1; } err = hwloc_topology_init(&topology); if (err) goto out; hwloc_topology_set_flags(topology, flags); err = hwloc_topology_load(topology); if (err) goto out_with_topology; support = hwloc_topology_get_support(topology); if (get_last_cpu_location) { if (!support->cpubind->get_proc_last_cpu_location) goto out_with_topology; } else { if (!support->cpubind->get_proc_cpubind) goto out_with_topology; } topocpuset = hwloc_topology_get_topology_cpuset(topology); dir = opendir("/proc"); if (!dir) goto out_with_topology; cpuset = hwloc_bitmap_alloc(); if (!cpuset) goto out_with_dir; while ((dirent = readdir(dir))) { long pid_number; hwloc_pid_t pid; char pidoutput[1024]; char *end; char name[64] = ""; /* management of threads */ unsigned boundthreads = 0, i; long *tids = NULL; /* NULL if process is not threaded */ hwloc_bitmap_t *tidcpusets = NULL; pid_number = strtol(dirent->d_name, &end, 10); if (*end) /* Not a number */ continue; pid = hwloc_pid_from_number(pid_number, 0); #ifdef HWLOC_LINUX_SYS { unsigned pathlen = 6 + strlen(dirent->d_name) + 1 + 7 + 1; char *path; int file; ssize_t n; path = malloc(pathlen); snprintf(path, pathlen, "/proc/%s/cmdline", dirent->d_name); file = open(path, O_RDONLY); free(path); if (file >= 0) { n = read(file, name, sizeof(name) - 1); close(file); if (n <= 0) /* Ignore kernel threads and errors */ continue; name[n] = 0; } } #endif /* HWLOC_LINUX_SYS */ if (show_threads) { #ifdef HWLOC_LINUX_SYS /* check if some threads must be displayed */ unsigned pathlen = 6 + strlen(dirent->d_name) + 1 + 4 + 1; char *path; DIR *taskdir; path = malloc(pathlen); snprintf(path, pathlen, "/proc/%s/task", dirent->d_name); taskdir = opendir(path); if (taskdir) { struct dirent *taskdirent; long tid; unsigned n = 0; /* count threads */ while ((taskdirent = readdir(taskdir))) { tid = strtol(taskdirent->d_name, &end, 10); if (*end) /* Not a number */ continue; n++; } if (n > 1) { /* if there's more than one thread, see if some are bound */ tids = malloc(n * sizeof(*tids)); tidcpusets = calloc(n+1, sizeof(*tidcpusets)); if (tids && tidcpusets) { /* reread the directory but gather info now */ rewinddir(taskdir); i = 0; while ((taskdirent = readdir(taskdir))) { tid = strtol(taskdirent->d_name, &end, 10); if (*end) /* Not a number */ continue; if (get_last_cpu_location) { if (hwloc_linux_get_tid_last_cpu_location(topology, tid, cpuset)) continue; } else { if (hwloc_linux_get_tid_cpubind(topology, tid, cpuset)) continue; } hwloc_bitmap_and(cpuset, cpuset, topocpuset); tids[i] = tid; tidcpusets[i] = hwloc_bitmap_dup(cpuset); i++; if (hwloc_bitmap_iszero(cpuset)) continue; if (hwloc_bitmap_isequal(cpuset, topocpuset) && !show_all) continue; boundthreads++; } } else { /* failed to alloc, behave as if there were no threads */ free(tids); tids = NULL; free(tidcpusets); tidcpusets = NULL; } } closedir(taskdir); } #endif /* HWLOC_LINUX_SYS */ } if (get_last_cpu_location) { if (hwloc_get_proc_last_cpu_location(topology, pid, cpuset, 0)) continue; } else { if (hwloc_get_proc_cpubind(topology, pid, cpuset, 0)) continue; } hwloc_bitmap_and(cpuset, cpuset, topocpuset); if (hwloc_bitmap_iszero(cpuset)) continue; /* don't print anything if the process isn't bound and if no threads are bound and if not showing all */ if (hwloc_bitmap_isequal(cpuset, topocpuset) && (!tids || !boundthreads) && !show_all) continue; pidoutput[0] = '\0'; if (pidcmd) { char *cmd; FILE *file; cmd = malloc(strlen(pidcmd)+1+5+2+1); sprintf(cmd, "%s %u", pidcmd, pid); file = popen(cmd, "r"); if (file) { if (fgets(pidoutput, sizeof(pidoutput), file)) { end = strchr(pidoutput, '\n'); if (end) *end = '\0'; } pclose(file); } free(cmd); } /* print the process */ print_task(topology, pid_number, name, cpuset, pidoutput[0] == '\0' ? NULL : pidoutput, 0); if (tids) /* print each tid we found (it's tidcpuset isn't NULL anymore) */ for(i=0; tidcpusets[i] != NULL; i++) { print_task(topology, tids[i], "", tidcpusets[i], NULL, 1); hwloc_bitmap_free(tidcpusets[i]); } /* free threads stuff */ free(tidcpusets); free(tids); } err = 0; hwloc_bitmap_free(cpuset); out_with_dir: closedir(dir); out_with_topology: hwloc_topology_destroy(topology); out: return err; }