static int pcn_m_unicast(void *arg, const uint8_t *macaddr) { pcn_t *pcnp = (pcn_t *)arg; int i; uint16_t addr[3]; bcopy(macaddr, addr, sizeof (addr)); mutex_enter(&pcnp->pcn_intrlock); mutex_enter(&pcnp->pcn_xmtlock); if (IS_RUNNING(pcnp)) pcn_suspend(pcnp); for (i = 0; i < 3; i++) pcn_csr_write(pcnp, PCN_CSR_PAR0 + i, addr[i]); bcopy(macaddr, pcnp->pcn_addr, ETHERADDRL); if (IS_RUNNING(pcnp)) pcn_resume(pcnp); mutex_exit(&pcnp->pcn_xmtlock); mutex_exit(&pcnp->pcn_intrlock); return (0); }
static int pcn_m_promisc(void *arg, boolean_t on) { pcn_t *pcnp = (pcn_t *)arg; mutex_enter(&pcnp->pcn_intrlock); mutex_enter(&pcnp->pcn_xmtlock); pcnp->pcn_promisc = on; if (IS_RUNNING(pcnp)) pcn_suspend(pcnp); /* set promiscuous mode */ if (pcnp->pcn_promisc) PCN_CSR_SETBIT(pcnp, PCN_CSR_MODE, PCN_MODE_PROMISC); else PCN_CSR_CLRBIT(pcnp, PCN_CSR_MODE, PCN_MODE_PROMISC); if (IS_RUNNING(pcnp)) pcn_resume(pcnp); mutex_exit(&pcnp->pcn_xmtlock); mutex_exit(&pcnp->pcn_intrlock); return (0); }
/* the scheduler code */ tcb_t * find_next_thread() { for (int i = current_max_prio; i >= 0; i--) { tcb_t *tcb = prio_queue[i]; while (tcb) { //printf("find_next1 (%d) %x (%x, %x)\n", i, tcb, tcb->ready_next, tcb->thread_state); #if defined(CONFIG_DEBUG_SANITY) ASSERT(tcb->queue_state & TS_QUEUE_READY); ASSERT(tcb->ready_next != NULL); #endif tcb = tcb->ready_next; //printf("find_next2 %x (%x, %x)\n", tcb, tcb->ready_next, tcb->thread_state); if ( IS_RUNNING(tcb) && (tcb->timeslice > 0) ) { prio_queue[i] = tcb; current_max_prio = i; //printf("find_next: returns %p\n", tcb); return tcb; } else { //printf("dequeueing %p state=%x, snd_queue=%p\n", tcb, tcb->thread_state, tcb->send_queue); //enter_kdebug(); thread_dequeue_ready(tcb); tcb = prio_queue[i]; } } } /* if we can't find a schedulable thread - switch to idle */ //printf("find_next: returns idle\n"); current_max_prio = -1; return get_idle_tcb(); }
void sys_thread_switch(l4_threadid_t tid) { TRACEPOINT_1PAR(SYS_THREAD_SWITCH, tid.raw); #if defined (CONFIG_DEBUG_TRACE_SYSCALLS) if (tid == L4_NIL_ID) spin1(75); else printf("sys_thread_switch(tid: %x)\n", tid); #endif /* Make sure we are in the ready queue to * find at least ourself and ensure that the thread * is rescheduled */ thread_enqueue_ready(get_current_tcb()); tcb_t * tcb = tid_to_tcb(tid); if (!(!l4_is_nil_id(tid) && (tcb->myself == tid ) && (IS_RUNNING(tcb)))) tcb = find_next_thread(); /* do dispatch only if necessary */ if (tcb != get_current_tcb()) dispatch_thread(tcb); return_thread_switch(); }
static int do_vperfctr_resume(struct vperfctr *perfctr, struct task_struct *tsk) { unsigned int resume_cstatus; int ret; if (!tsk) return -ESRCH; /* attempt to update unlinked perfctr */ /* PREEMPT note: preemption is disabled over the entire region because we're updating an active perfctr. */ if (IS_RUNNING(perfctr) && tsk == current) vperfctr_suspend(perfctr); resume_cstatus = perfctr->resume_cstatus; if (perfctr_cstatus_enabled(resume_cstatus)) { perfctr->cpu_state.user.cstatus = resume_cstatus; perfctr->resume_cstatus = 0; vperfctr_ireload(perfctr); ret = 0; } else { ret = vperfctr_enable_control(perfctr, tsk); resume_cstatus = perfctr->cpu_state.user.cstatus; } if (ret >= 0 && perfctr_cstatus_enabled(resume_cstatus) && tsk == current) vperfctr_resume(perfctr); return ret; }
/* Sample the counters but do not suspend them. */ static void vperfctr_sample(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { perfctr_cpu_sample(&perfctr->cpu_state); vperfctr_reset_sampling_timer(perfctr); } }
static int sys_vperfctr_iresume(struct vperfctr *perfctr, const struct task_struct *tsk) { #ifdef CONFIG_PERFCTR_INTERRUPT_SUPPORT unsigned int iresume_cstatus; if (!tsk) return -ESRCH; /* attempt to update unlinked perfctr */ iresume_cstatus = perfctr->iresume_cstatus; if (!perfctr_cstatus_has_ictrs(iresume_cstatus)) return -EPERM; /* PREEMPT note: preemption is disabled over the entire region because we're updating an active perfctr. */ preempt_disable(); if (IS_RUNNING(perfctr) && tsk == current) vperfctr_suspend(perfctr); perfctr->cpu_state.cstatus = iresume_cstatus; perfctr->iresume_cstatus = 0; /* remote access note: perfctr_cpu_ireload() is ok */ perfctr_cpu_ireload(&perfctr->cpu_state); if (tsk == current) vperfctr_resume(perfctr); preempt_enable(); return 0; #else return -ENOSYS; #endif }
static void ContinueCmd(char** argv, int argc, int wait) { if (argc >= 2) { int jid = atoi(argv[1]); bgjobL* node = bgjobs; while (node != NULL) { if (node->jid == jid) { if (wait) { node->status |= FOREGROUND; } else { node->status &= ~FOREGROUND; } node->status |= RUNNING; kill(node->pid, SIGCONT); if (wait) { while (IS_RUNNING(node)) { sleep(1); } } break; } node = node->next; } } }
int pcn_ddi_resume(dev_info_t *dip) { pcn_t *pcnp; if ((pcnp = ddi_get_soft_state(pcn_ssp, ddi_get_instance(dip))) == NULL) return (DDI_FAILURE); mutex_enter(&pcnp->pcn_intrlock); mutex_enter(&pcnp->pcn_xmtlock); pcnp->pcn_flags &= ~PCN_SUSPENDED; if (!pcn_initialize(pcnp, B_FALSE)) { pcn_error(pcnp->pcn_dip, "unable to resume chip"); pcnp->pcn_flags |= PCN_SUSPENDED; mutex_exit(&pcnp->pcn_intrlock); mutex_exit(&pcnp->pcn_xmtlock); return (DDI_SUCCESS); } if (IS_RUNNING(pcnp)) pcn_startall(pcnp); mutex_exit(&pcnp->pcn_xmtlock); mutex_exit(&pcnp->pcn_intrlock); mii_resume(pcnp->pcn_mii); return (DDI_SUCCESS); }
/* Sample the counters but do not suspend them. */ static void vperfctr_sample(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { debug_check_smp_id(perfctr); perfctr_cpu_sample(&perfctr->cpu_state); vperfctr_reset_sampling_timer(perfctr); } }
/* Sample the counters but do not suspend them. */ static inline void vperfctr_sample(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { // logical place to see if the counters are ours else return perfctr_cpu_sample(&perfctr->cpu_state); vperfctr_reset_sampling_timer(perfctr); } }
long double stopwatch_stop (struct stopwatch_t* T) { long double dt = 0; if (T) { if (IS_RUNNING (*T)) STAMP_STOP (*T); dt = stopwatch_elapsed (T); } return dt; }
static int do_vperfctr_iresume(struct vperfctr *perfctr, const struct task_struct *tsk) { #ifdef CONFIG_PERFCTR_INTERRUPT_SUPPORT unsigned int iresume_cstatus; if (!tsk) return -ESRCH; /* attempt to update unlinked perfctr */ iresume_cstatus = perfctr->iresume_cstatus; if (!perfctr_cstatus_has_ictrs(iresume_cstatus)) { return -EPERM; } /* PREEMPT note: preemption is disabled over the entire region because we're updating an active perfctr. */ preempt_disable(); // this is for resuming a task whose signal was handled prior to this call // are the i-mode counters frozen before the overflow-signal is delivered // yes, they are. in the suspend call invoked in the handler // why exactly are we suspending the following? Makes sense ... if the // counters are already running, then one should not just resume the task // which will overwrite the PMC registers with old values. Nice. Under // what condition do counters continue to count after the signal is delivered // remember TSC was not suspend in the handler and continues to count if (IS_RUNNING(perfctr) && tsk == current) vperfctr_suspend(perfctr); // setting the cstatus of 'cpu_state' back to what it was prior to its // zeroing out in the interrupt handler perfctr->cpu_state.cstatus = iresume_cstatus; perfctr->iresume_cstatus = 0; /* remote access note: perfctr_cpu_ireload() is ok */ // the following forces the reload of control registers that // unfreezes the i-mode registers perfctr_cpu_ireload(&perfctr->cpu_state); if (tsk == current) vperfctr_resume(perfctr); preempt_enable(); return 0; #else return -ENOSYS; #endif }
long double stopwatch_elapsed (const struct stopwatch_t* T) { long double dt = 0; if (T) { if (IS_RUNNING (*T)) { DECL_RAW (stop); STAMP_RAW (stop); dt = ELAPSED (READ_START (*T), stop); } else { dt = ELAPSED (READ_START (*T), READ_STOP (*T)); } } return dt; }
/* schedule() --> switch_to() --> .. --> __vperfctr_resume(). * PRE: perfctr == current->arch.thread.perfctr * If the counters are runnable, resume them. * PREEMPT note: switch_to() runs with preemption disabled. */ void __vperfctr_resume(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { #ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK if (unlikely(atomic_read(&perfctr->bad_cpus_allowed)) && perfctr_cstatus_nrctrs(perfctr->cpu_state.user.cstatus)) { perfctr->cpu_state.user.cstatus = 0; perfctr->resume_cstatus = 0; BUG_ON(current->state != TASK_RUNNING); send_sig(SIGILL, current, 1); return; } #endif vperfctr_resume_with_overflow_check(perfctr); } }
static void pcn_startall(pcn_t *pcnp) { ASSERT(mutex_owned(&pcnp->pcn_intrlock)); ASSERT(mutex_owned(&pcnp->pcn_xmtlock)); (void) pcn_initialize(pcnp, B_FALSE); /* Start chip and enable interrupts */ PCN_CSR_SETBIT(pcnp, PCN_CSR_CSR, PCN_CSR_START|PCN_CSR_INTEN); pcn_start_timer(pcnp); if (IS_RUNNING(pcnp)) mac_tx_update(pcnp->pcn_mh); }
static COMMAND_FUNC( do_fmt7_setsize ) { uint32_t w,h; w=HOW_MANY("width"); h=HOW_MANY("height"); CHECK_CAM /* Don't try to set the image size if capture is running... */ if( IS_RUNNING(the_cam_p) ){ WARN("can't set image size while camera is running!?"); return; } UNIMP_MSG("set_fmt7_size"); }
static int do_vperfctr_suspend(struct vperfctr *perfctr, struct task_struct *tsk) { if (!tsk) return -ESRCH; /* attempt to update unlinked perfctr */ /* PREEMPT note: preemption is disabled over the entire region since we're updating an active perfctr. */ if (IS_RUNNING(perfctr)) { if (tsk == current) vperfctr_suspend(perfctr); perfctr->resume_cstatus = perfctr->cpu_state.user.cstatus; perfctr->cpu_state.user.cstatus = 0; } return 0; }
/* Called from exit_thread() or sys_vperfctr_unlink(). * If the counters are running, stop them and sample their final values. * Detach the vperfctr object from its owner task. * PREEMPT note: exit_thread() does not run with preemption disabled. */ static void vperfctr_unlink(struct task_struct *owner, struct vperfctr *perfctr) { /* this synchronises with vperfctr_ioctl() */ spin_lock(&perfctr->owner_lock); perfctr->owner = NULL; spin_unlock(&perfctr->owner_lock); /* perfctr suspend+detach must be atomic wrt process suspend */ /* this also synchronises with perfctr_set_cpus_allowed() */ vperfctr_task_lock(owner); if (IS_RUNNING(perfctr) && owner == current) vperfctr_suspend(perfctr); owner->thread.perfctr = NULL; vperfctr_task_unlock(owner); perfctr->cpu_state.cstatus = 0; vperfctr_clear_iresume_cstatus(perfctr); put_vperfctr(perfctr); }
/* schedule() --> switch_to() --> .. --> __vperfctr_resume(). * PRE: perfctr == current->thread.perfctr * If the counters are runnable, resume them. * PREEMPT note: switch_to() runs with preemption disabled. */ void __vperfctr_resume(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { // logical place to add the functionality // what exactly are we doing here ? #ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK if (unlikely(atomic_read(&perfctr->bad_cpus_allowed)) && perfctr_cstatus_nrctrs(perfctr->cpu_state.cstatus)) { perfctr->cpu_state.cstatus = 0; vperfctr_clear_iresume_cstatus(perfctr); BUG_ON(current->state != TASK_RUNNING); send_sig(SIGILL, current, 1); return; } #endif vperfctr_resume_with_overflow_check(perfctr); } }
static int do_vperfctr_clear(struct vperfctr *perfctr, struct task_struct *tsk) { if (!tsk) return -ESRCH; /* attempt to update unlinked perfctr */ /* PREEMPT note: preemption is disabled over the entire region because we're updating an active perfctr. */ if (IS_RUNNING(perfctr) && tsk == current) vperfctr_suspend(perfctr); memset(&perfctr->cpu_state, 0, sizeof perfctr->cpu_state); perfctr->resume_cstatus = 0; spin_lock(&perfctr->children_lock); perfctr->inheritance_id = 0; memset(&perfctr->children, 0, sizeof perfctr->children); spin_unlock(&perfctr->children_lock); return 0; }
/* Called from exit_thread() or do_vperfctr_unlink(). * If the counters are running, stop them and sample their final values. * Mark the vperfctr object as dead. * Optionally detach the vperfctr object from its owner task. * PREEMPT note: exit_thread() does not run with preemption disabled. */ static void vperfctr_unlink(struct task_struct *owner, struct vperfctr *perfctr, int do_unlink) { /* this synchronises with sys_vperfctr() */ spin_lock(&perfctr->owner_lock); perfctr->owner = NULL; spin_unlock(&perfctr->owner_lock); /* perfctr suspend+detach must be atomic wrt process suspend */ /* this also synchronises with perfctr_set_cpus_allowed() */ //task_lock(owner); if (IS_RUNNING(perfctr) && owner == current) vperfctr_suspend(perfctr); if (do_unlink) owner->arch.thread.perfctr = NULL; //task_unlock(owner); perfctr->cpu_state.user.cstatus = 0; perfctr->resume_cstatus = 0; if (do_unlink) put_vperfctr(perfctr); }
static void RunBuiltInCmd(commandT* cmd) { /* If applicable, reformat command with the aliases substituted */ aliasL* node = aliases; while (node != NULL) { if (strcmp(cmd->argv[0], node->name) == 0) { char** argv = (char**) malloc(sizeof(char*) * cmd->argc); // store new argv for concatenating into a new command /* populate argv with aliased replacements as long as the alias's command is trailed by a space */ int argi; int aliasNext = TRUE; size_t length = 0; for (argi = 0; argi < cmd->argc; argi++) { node = NULL; if (aliasNext) { node = aliases; while (node != NULL) { if (strcmp(cmd->argv[argi], node->name) == 0) { break; } node = node->next; } } if (node != NULL) { argv[argi] = strdup(node->cmdline); length += strlen(argv[argi]) + 1; if (node->cmdline[strlen(node->cmdline) - 1] != ' ') { aliasNext = FALSE; } } else { argv[argi] = strdup(cmd->argv[argi]); length += strlen(argv[argi]) + 1; aliasNext = FALSE; } } /* create new command string */ char* cmdline = (char*) malloc(sizeof(char) * length); strcpy(cmdline, ""); for (argi = 0; argi < cmd->argc; argi++) { strcat(cmdline, argv[argi]); strcat(cmdline, " "); } /* handle as any other command */ Interpret(cmdline); return; // return to shell } node = node->next; } /* Builtin commands implemented here */ if (strcmp(cmd->argv[0], "cd") == 0) { char* path; if (cmd->argc > 1) { path = cmd->argv[1]; } else { path = getenv("HOME"); } if (chdir(path) < 0) { printf("failed to change directory\n"); fflush(stdout); } } else if (strcmp(cmd->argv[0], "jobs") == 0) { bgjobL* node = bgjobs; while (node != NULL) { if (IS_TERMINATED(node)) { printf("[%d] Done %s\n", node->jid, node->cmdline); } else { if (IS_RUNNING(node)) { printf("[%d] Running %s &\n", node->jid, node->cmdline); } else { printf("[%d] Stopped %s\n", node->jid, node->cmdline); } } fflush(stdout); CleanupJob(&node, FALSE); if (node == NULL) { node = bgjobs; } else { node = node->next; } } } else if (strcmp(cmd->argv[0], "fg") == 0) { ContinueCmd(cmd->argv, cmd->argc, TRUE); } else if (strcmp(cmd->argv[0], "bg") == 0) { ContinueCmd(cmd->argv, cmd->argc, FALSE); } else if (strcmp(cmd->argv[0], "alias") == 0) { if (cmd->argc == 1) { /* Print Aliases */ aliasL* node = aliases; while (node != NULL) { printf("alias %s='%s'\n", node->name, node->cmdline); fflush(stdout); node = node->next; } } else { /* Make Alias */ size_t h = 0; while (cmd->cmdline[h] != 'a') { h++; } // assume that lias comes after 'a' h += 5; while (cmd->cmdline[h] == ' ') { h++; } size_t i = 0; while (cmd->cmdline[h + i] != '=') { i++; } char* name = (char*) malloc(sizeof(char) * (i + 1)); strncpy(name, &(cmd->cmdline)[h], i); name[i] = '\0'; while (cmd->cmdline[h + i] != '\'') { i++; } i++; size_t j = 0; while (cmd->cmdline[h + i + j] != '\'') { j++; } char* cmdline = (char*) malloc(sizeof(char) * (j + 1)); strncpy(cmdline, &(cmd->cmdline)[h + i], j); cmdline[j] = '\0'; /* find the sorted place for the alias */ aliasL* found = NULL; aliasL* prev = NULL; aliasL* node = aliases; while (node != NULL && found == NULL) { int cmp = strcmp(name, node->name); if (cmp == 0) { found = node; break; } else if (cmp < 0) { found = (aliasL*) malloc(sizeof(aliasL)); if (prev != NULL) { prev->next = found; } else { aliases = found; } found->next = node; break; } prev = node; node = node->next; } if (found == NULL) { found = (aliasL*) malloc(sizeof(aliasL)); if (prev != NULL) { prev->next = found; } else { aliases = found; } found->next = NULL; } found->name = name; found->cmdline = cmdline; } } else if (strcmp(cmd->argv[0], "unalias") == 0) { aliasL* prev = NULL; aliasL* node = aliases; while (node != NULL) { if (strcmp(node->name, cmd->argv[1]) == 0) { if (prev != NULL) { prev->next = node->next; } else { aliases = node->next; } free(node->name); free(node->cmdline); free(node); node = NULL; } else { prev = node; node = node->next; } } } }
static void Exec(commandT* cmd, bool forceFork) { sigset_t mask; sigset_t old; sigemptyset(&mask); sigaddset(&mask, SIGCHLD); sigaddset(&mask, SIGTSTP); sigaddset(&mask, SIGINT); if (sigprocmask(SIG_BLOCK, &mask, &old) < 0) { printf("tsh: failed to change tsh signal mask"); fflush(stdout); return; } int child_pid = fork(); /* The processes split here */ if(child_pid < 0) { printf("failed to fork\n"); fflush(stdout); return; } if(child_pid > 0) { setpgid(child_pid, 0); // Move child into its own process group. int status; if (cmd->bg) { status = RUNNING; } else { status = FOREGROUND | RUNNING; } bgjobL* job = AddJob(child_pid, cmd->cmdline, status); if (sigprocmask(SIG_SETMASK, &old, NULL) < 0) { printf("tsh: failed to change tsh signal mask"); fflush(stdout); return; } if (!cmd->bg) { while (IS_RUNNING(job)) { sleep(1); } } } else { setpgid(0, 0); // Move child into its own process group. if (sigprocmask(SIG_SETMASK, &old, NULL) < 0) { printf("tsh: failed to change child signal mask"); fflush(stdout); exit(2); } if (cmd->is_redirect_in) { int in = open(cmd->redirect_in, O_RDONLY); if (in < 0) { printf("failed to open %s for reading", cmd->redirect_in); fflush(stdout); exit(2); } dup2(in, STDIN); close(in); } if (cmd->is_redirect_out) { int out = open(cmd->redirect_out, O_WRONLY | O_TRUNC | O_CREAT, S_IRUSR | S_IRGRP | S_IWGRP | S_IWUSR); if (out < 0) { printf("failed to open %s for writing", cmd->redirect_out); fflush(stdout); exit(2); } dup2(out, STDOUT); close(out); } execv(cmd->name, cmd->argv); exit(2); } }
void RunCmdPipe(commandT* cmd, commandT** rest, int n, int incoming) { // cmd = current command to be run // rest = remaining commands to be run // n = length of rest // incoming = incoming "STDIN" fd int builtin = FALSE; if (IsBuiltIn(cmd->argv[0])) { builtin = TRUE; } else { if (!ResolveExternalCmd(cmd)) { printf("%s: command not found\n", cmd->argv[0]); fflush(stdout); if (incoming != -1) { close(incoming); } return; } } // Set up pipe if there are commands left to run int fd[2]; if (n) { if (pipe(fd) < 0) { printf("failed to create pipe\n"); fflush(stdout); } } sigset_t mask; sigset_t old; sigemptyset(&mask); sigaddset(&mask, SIGCHLD); sigaddset(&mask, SIGTSTP); sigaddset(&mask, SIGINT); if (sigprocmask(SIG_BLOCK, &mask, &old) < 0) { printf("tsh: failed to change tsh signal mask"); fflush(stdout); return; } int child_pid = fork(); /* The processes split here */ if(child_pid < 0) { printf("failed to fork\n"); fflush(stdout); return; } if (child_pid) { setpgid(child_pid, 0); // Move child into its own process group. bgjobL* job = AddJob(child_pid, cmd->cmdline, FOREGROUND | RUNNING); if (sigprocmask(SIG_SETMASK, &old, NULL) < 0) { printf("tsh: failed to change tsh signal mask"); fflush(stdout); return; } // close incoming (if available) now that we're done reading it if (incoming != -1) { close(incoming); } // close the write end of fd (if available) now that we're done writing to it if (n) { close(fd[1]); } while (IS_RUNNING(job)) { sleep(1); } CleanupJob(&job, TRUE); } else { setpgid(0, 0); // Move child into its own process group. if (sigprocmask(SIG_SETMASK, &old, NULL) < 0) { printf("tsh: failed to change child signal mask"); fflush(stdout); exit(2); } // Map incoming pipe fd to STDIN (if available) if (incoming != -1) { if (dup2(incoming, STDIN) < 0) { printf("failed to map pipe to STDIN\n"); fflush(stdout); exit(2); } close(incoming); } // Map STDOUT to outgoing pipe fd (if available) if (n) { if (dup2(fd[1], STDOUT) < 0) { printf("failed to map STDOUT to pipe\n"); fflush(stdout); exit(2); } close(fd[1]); } if (builtin) { RunBuiltInCmd(cmd); exit(0); } else { execv(cmd->name, cmd->argv); exit(2); } } if (n) { // pipe into the next process RunCmdPipe(rest[0], &rest[1], n - 1, fd[0]); } }
int timer_del_safe(struct timer_ln* tl) #endif { int ret; ret=-1; again: /* quick exit if timer inactive */ if ( !(tl->flags & F_TIMER_ACTIVE)){ #ifdef TIMER_DEBUG LOG(timerlog, "timer_del called on an inactive timer %p (%p, %p)," " flags %x\n", tl, tl->next, tl->prev, tl->flags); LOG(timerlog, "WARN: -timer_del-; called from %s(%s):%d\n", func, file, line); LOG(timerlog, "WARN: -timer_del-: added %d times" ", last from: %s(%s):%d, deleted %d times" ", last from: %s(%s):%d, init %d times, expired %d \n", tl->add_calls, tl->add_func, tl->add_file, tl->add_line, tl->del_calls, tl->del_func, tl->del_file, tl->del_line, tl->init, tl->expires_no); #else /* LM_DBG("called on an inactive timer %p (%p, %p)," " flags %x\n", tl, tl->next, tl->prev, tl->flags); */ #endif return -1; } #ifdef USE_SLOW_TIMER if (IS_ON_SLOW_LIST(tl) && (tl->slow_idx!=*t_idx)){ LOCK_SLOW_TIMER_LIST(); if (!IS_ON_SLOW_LIST(tl) || (tl->slow_idx==*t_idx)){ UNLOCK_SLOW_TIMER_LIST(); goto again; } if (IS_RUNNING_SLOW(tl)){ UNLOCK_SLOW_TIMER_LIST(); if (IS_IN_TIMER_SLOW()){ /* if somebody tries to shoot himself in the foot, * warn him and ignore the delete */ LM_CRIT("timer handle %p (s) tried to delete" " itself\n", tl); #ifdef TIMER_DEBUG LOG(timerlog, "WARN: -timer_del-: called from %s(%s):%d\n", func, file, line); LOG(timerlog, "WARN: -timer_del-: added %d times" ", last from: %s(%s):%d, deleted %d times" ", last from: %s(%s):%d, init %d times, expired %d \n", tl->add_calls, tl->add_func, tl->add_file, tl->add_line, tl->del_calls, tl->del_func, tl->del_file, tl->del_line, tl->init, tl->expires_no); #endif return -2; /* do nothing */ } sched_yield(); /* wait for it to complete */ goto again; } if (tl->next!=0){ _timer_rm_list(tl); /* detach */ tl->next=tl->prev=0; ret=0; #ifdef TIMER_DEBUG tl->del_file=file; tl->del_func=func; tl->del_line=line; tl->flags|=F_TIMER_DELETED; #endif }else{ #ifdef TIMER_DEBUG LOG(timerlog, "timer_del: (s) timer %p (%p, %p) flags %x " "already detached\n", tl, tl->next, tl->prev, tl->flags); LOG(timerlog, "WARN: -timer_del-: @%d tl=%p " "{ %p, %p, %d, %d, %p, %p, %04x, -}\n", get_ticks_raw(), tl, tl->next, tl->prev, tl->expire, tl->initial_timeout, tl->data, tl->f, tl->flags); LOG(timerlog, "WARN: -timer_del-; called from %s(%s):%d\n", func, file, line); LOG(timerlog, "WARN: -timer_del-: added %d times" ", last from: %s(%s):%d, deleted %d times" ", last from: %s(%s):%d, init %d times, expired %d \n", tl->add_calls, tl->add_func, tl->add_file, tl->add_line, tl->del_calls, tl->del_func, tl->del_file, tl->del_line, tl->init, tl->expires_no); #else /* LM_DBG("(s) timer %p (%p, %p) flags %x " "already detached\n", tl, tl->next, tl->prev, tl->flags); */ #endif ret=-1; } UNLOCK_SLOW_TIMER_LIST(); }else{ #endif LOCK_TIMER_LIST(); #ifdef USE_SLOW_TIMER if (IS_ON_SLOW_LIST(tl) && (tl->slow_idx!=*t_idx)){ UNLOCK_TIMER_LIST(); goto again; } #endif if (IS_RUNNING(tl)){ UNLOCK_TIMER_LIST(); if (IS_IN_TIMER()){ /* if somebody tries to shoot himself in the foot, * warn him and ignore the delete */ LM_CRIT("timer handle %p tried to delete" " itself\n", tl); #ifdef TIMER_DEBUG LOG(timerlog, "WARN: -timer_del-: called from %s(%s):%d\n", func, file, line); LOG(timerlog, "WARN: -timer_del-: added %d times" ", last from: %s(%s):%d, deleted %d times" ", last from: %s(%s):%d, init %d times, expired %d \n", tl->add_calls, tl->add_func, tl->add_file, tl->add_line, tl->del_calls, tl->del_func, tl->del_file, tl->del_line, tl->init, tl->expires_no); #endif return -2; /* do nothing */ } sched_yield(); /* wait for it to complete */ goto again; } if ((tl->next!=0)&&(tl->prev!=0)){ _timer_rm_list(tl); /* detach */ tl->next=tl->prev=0; ret=0; #ifdef TIMER_DEBUG tl->del_file=file; tl->del_func=func; tl->del_line=line; tl->flags|=F_TIMER_DELETED; #endif }else{ #ifdef TIMER_DEBUG LOG(timerlog, "timer_del: (f) timer %p (%p, %p) flags %x " "already detached\n", tl, tl->next, tl->prev, tl->flags); LOG(timerlog, "WARN: -timer_del-: @%d tl=%p " "{ %p, %p, %d, %d, %p, %p, %04x, -}\n", get_ticks_raw(), tl, tl->next, tl->prev, tl->expire, tl->initial_timeout, tl->data, tl->f, tl->flags); LOG(timerlog, "WARN: -timer_del-; called from %s(%s):%d\n", func, file, line); LOG(timerlog, "WARN: -timer_del-: added %d times" ", last from: %s(%s):%d, deleted %d times" ", last from: %s(%s):%d, init %d times, expired %d \n", tl->add_calls, tl->add_func, tl->add_file, tl->add_line, tl->del_calls, tl->del_func, tl->del_file, tl->del_line, tl->init, tl->expires_no); #else /* LM_DBG("(f) timer %p (%p, %p) flags %x " "already detached\n", tl, tl->next, tl->prev, tl->flags); */ #endif ret=-1; } UNLOCK_TIMER_LIST(); #ifdef USE_SLOW_TIMER } #endif return ret; }
static int do_vperfctr_write(struct vperfctr *perfctr, unsigned int domain, const void __user *srcp, unsigned int srcbytes, struct task_struct *tsk) { void *tmp; int err; if (!tsk) return -ESRCH; /* attempt to update unlinked perfctr */ if (srcbytes > PAGE_SIZE) /* primitive sanity check */ return -EINVAL; tmp = kmem_alloc(srcbytes); if (!tmp) return -ENOMEM; err = -EFAULT; if (copy_from_user(tmp, srcp, srcbytes)) goto out_kfree; if (IS_RUNNING(perfctr)) { if (tsk == current) vperfctr_suspend(perfctr); perfctr->cpu_state.user.cstatus = 0; perfctr->resume_cstatus = 0; } switch (domain) { case VPERFCTR_DOMAIN_CONTROL: { struct vperfctr_control control; err = -EINVAL; if (srcbytes > sizeof(control)) break; control.si_signo = perfctr->si_signo; control.preserve = perfctr->preserve; memcpy(&control, tmp, srcbytes); /* XXX: validate si_signo? */ perfctr->si_signo = control.si_signo; perfctr->preserve = control.preserve; err = 0; break; } case PERFCTR_DOMAIN_CPU_CONTROL: err = -EINVAL; if (srcbytes > sizeof(perfctr->cpu_state.control.header)) break; memcpy(&perfctr->cpu_state.control.header, tmp, srcbytes); err = 0; break; case PERFCTR_DOMAIN_CPU_MAP: err = -EINVAL; if (srcbytes > sizeof(perfctr->cpu_state.control.pmc_map)) break; memcpy(perfctr->cpu_state.control.pmc_map, tmp, srcbytes); err = 0; break; default: err = perfctr_cpu_control_write(&perfctr->cpu_state.control, domain, tmp, srcbytes); } out_kfree: kmem_free(tmp); return err; }
/* schedule() --> switch_to() --> .. --> __vperfctr_resume(). * PRE: perfctr == current->arch.thread.perfctr * If the counters are runnable, resume them. * PREEMPT note: switch_to() runs with preemption disabled. */ void __vperfctr_resume(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { vperfctr_resume(perfctr); } }
/* schedule() --> switch_to() --> .. --> __vperfctr_suspend(). * If the counters are running, suspend them. * PREEMPT note: switch_to() runs with preemption disabled. */ void __vperfctr_suspend(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) vperfctr_suspend(perfctr); }
/* Sample the counters but do not suspend them. */ static void vperfctr_sample(struct vperfctr *perfctr) { if (IS_RUNNING(perfctr)) { perfctr_cpu_sample(&perfctr->cpu_state); } }