static void kb_irq(void) { char buf[2]={0}; char numbuf[6];/*five digit will be enough for a int*/ int i; byte scancode=inportb(0x60); //really ineffective for(i=0;i<sizeof(Scan_Tab);i++) { if(scancode==Scan_Tab[i]) break; } if(i<sizeof(Scan_Tab) ) { kprintf("%c", Disp_Tab[i]); switch (Disp_Tab[i]) { case 'X': enable_timer(); break; case 'R': disable_timer(); break; case 'S': reset_sys(); } } outportb(0x20,0x20); }
static __exit void oct_ilm_module_exit(void) { disable_timer(TIMER_NUM); if (dir) debugfs_remove_recursive(dir); free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0); }
static int wake_timer_suspend(struct device *dev) { struct wake_timer *tm = dev_get_drvdata(dev); #ifdef USE_32K_CLK int val; #endif if(!(tm->can_suspend && tm->suspend_ms)) return -EBUSY; #ifdef USE_32K_CLK val = readl(__io_address(PRCM_TIMER_WKUP_CLK)); val |= 0x07;//timer_wkup_clken val |= (0x3<<TIMER_WKUP_CLK_SEL_BIT); writel(val, __io_address(PRCM_TIMER_WKUP_CLK)); val = readl(__io_address(PRCM_WKUP_IRQ_CTRL)); //val = 0x3ff10300; val |= (0x1<<TIMER_WKUP_INTER_EN_BIT); writel(val, __io_address(PRCM_WKUP_IRQ_CTRL)); #endif enable_irq_wake(tm->irq[0]); /*enable_irq_wake(tm->irq[1]);*/ enable_timer(tm, tm->suspend_ms*10, 0); #ifdef MANU_UNLOCK disable_timer(tm, 1); #endif return 0; }
static void __init clocksource_init() { #if 1 disable_timer(1); putreg32(0, STLR_TIMER_GPTMCFG(1)); // Setup periodic timer with incrementing counter putreg32(TIMER_GPTMTAMR_TAMR_PERIODIC | TIMER_GPTMTAMR_TACDIR_UP, STLR_TIMER_GPTMTAMR(1)); putreg32(0xFFFFFFFF, STLR_TIMER_GPTMTAILR(1)); // Enable timer enable_timer(1); clocksource_calc_mult_shift(&sysclk_clocksource, CLOCK_TICK_RATE, 20); sysclk_clocksource.mask = CLOCKSOURCE_MASK(32); //sysclk_clocksource.mult = // clocksource_khz2mult(CLOCK_TICK_RATE / 1000, sysclk_clocksource.shift); #else clocksource_calc_mult_shift(&sysclk_clocksource, 4000000, 20); sysclk_clocksource.mask = CLOCKSOURCE_MASK(24); putreg32(0xFFFFFF, STLR_SYSTICK_RELOAD); putreg32(STLR_SYSTICK_CTRL_ENABLE | STLR_SYSTICK_CTRL_CLK_SRC_PIOSC_DIV_4, STLR_SYSTICK_CTRL); #endif clocksource_register(&sysclk_clocksource); }
static irqreturn_t mfgpt_tick(int irq, void *dev_id) { uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP); /* See if the interrupt was for us */ if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1))) return IRQ_NONE; /* Turn off the clock (and clear the event) */ disable_timer(cs5535_event_clock); if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN) return IRQ_HANDLED; /* Clear the counter */ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0); /* Restart the clock in periodic mode */ if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC) cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); cs5535_clockevent.event_handler(&cs5535_clockevent); return IRQ_HANDLED; }
static void mfgpt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { disable_timer(cs5535_event_clock); if (mode == CLOCK_EVT_MODE_PERIODIC) start_timer(cs5535_event_clock, MFGPT_PERIODIC); cs5535_tick_mode = mode; }
static int __devexit wake_timer_remove(struct platform_device *pdev) { struct wake_timer *tm = platform_get_drvdata(pdev); if(tm) { #ifdef CONFIG_PM_AUTO_TEST_SUSPEND #if 0 clk_put(tm->clk); #endif disable_timer(tm, 0); disable_timer(tm, 1); free_irq(tm->irq[0], tm); free_irq(tm->irq[1], tm); iounmap(tm->mmio); #ifdef CONFIG_PM_WAKEUP_DEVICE_AUTO_TEST_SUSPEND input_unregister_device(tm->input_dev); #endif #endif /*CONFIG_PM_AUTO_TEST_SUSPEND*/ kfree(tm); } return 0; }
static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *c = dev_id; unsigned long flags; local_irq_save(flags); disable_timer(group2_base, 0); local_irq_restore(flags); c->event_handler(c); return IRQ_HANDLED; }
static int timer_set_next_event(unsigned long evt, struct clock_event_device *clk) { if( clk != &sysclk_clockevent ) { printk(KERN_ERR "%s: unknown clock device %s\n", __func__, clk->name); return -1; } disable_timer(0); putreg32(evt, STLR_TIMER_GPTMTAILR(0)); enable_timer(0); return 0; }
static irqreturn_t grp1_tmr1_irq(int irq, void *dev_id) { struct clock_event_device *c = dev_id; u32 cpuid = hard_smp_processor_id(); unsigned long flags; WARN_ON(cpuid != 3); spin_lock_irqsave(&soc_tmr_lock[1], flags); disable_timer(group1_base, 1); spin_unlock_irqrestore(&soc_tmr_lock[1], flags); c->event_handler(c); return IRQ_HANDLED; }
static int wake_timer_resume(struct device *dev) { struct wake_timer *tm = dev_get_drvdata(dev); #ifdef USE_32_CLK int val; val = readl(__io_address(PRCM_TIMER_WKUP_CLK)); val &= ~(3<<TIMER_WKUP_CLK_SEL_BIT); writel(val, __io_address(PRCM_TIMER_WKUP_CLK)); #endif disable_irq_wake(tm->irq[0]); /*disable_irq_wake(tm->irq[1]);*/ disable_timer(tm, 0); #ifdef MANU_UNLOCK enable_timer(tm, tm->wake_ms, 1); #endif return 0; }
static void itimer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: set_interval(); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_ONESHOT: disable_timer(); break; case CLOCK_EVT_MODE_RESUME: break; } }
/** * @brief * This function sleeps for given number of seconds * @param secs * Number of secs to sleep */ void sw_sleep(u32 secs) { timeval_t time; time.tval.nsec = 0; time.tval.sec = secs; # ifndef CONFIG_EMULATE_FIQ int current_context; struct timer_event *tevent; current_context = get_current_task_id(); #ifdef TIMER_DBG sw_printf("SW: sleep sec 0x%08x nsec 0x%08x \n",time.tval.sec,time.tval.nsec); #endif tevent = timer_event_create(&wake_up_from_sleep, ¤t_context); if(!tevent){ sw_printf("SW: Out of memory : Cannot Perform Sleep\n"); return; } timer_event_start(tevent, &time); suspend_task(current_context, TASK_STATE_WAIT); schedule(); # else u32 clockcycles = timeval_to_clockcycles(&time); #ifdef TIMER_DBG sw_printf("SW: clockcycles 0x%08x \n",clockcycles); #endif enable_timer(); while(1){ u32 curr_val = read_sleep_timer(); if(curr_val > clockcycles){ break; } } disable_timer(); # endif return; }
static void timer_set_mode(enum clock_event_mode mode, struct clock_event_device *clk) { if( clk != &sysclk_clockevent ) { printk(KERN_ERR "%s: unknown clock device %s\n", __func__, clk->name); return; } disable_timer(0); switch(mode) { case CLOCK_EVT_MODE_PERIODIC: printk(KERN_DEBUG "%s\n", "\tCLOCK_EVT_MODE_PERIODIC"); // Clear configuration register putreg32(0, STLR_TIMER_GPTMCFG(0)); // Setup periodic timer with decrimenting counter putreg32(TIMER_GPTMTAMR_TAMR_PERIODIC, STLR_TIMER_GPTMTAMR(0)); // Set CONFIG_HZ interval putreg32(CLOCK_TICK_RATE / CONFIG_HZ, STLR_TIMER_GPTMTAILR(0)); // Enable timer interrupt putreg32(TIMER_GPTMIMR_TATOIM_MASK, STLR_TIMER_GPTMIMR(0)); // Enable timer enable_timer(0); break; case CLOCK_EVT_MODE_ONESHOT: printk(KERN_DEBUG "%s\n", "\tCLOCK_EVT_MODE_ONESHOT"); putreg32(0, STLR_TIMER_GPTMCFG(0)); // Setup one shot timer with decrimenting counter putreg32(TIMER_GPTMTAMR_TAMR_ONESHOT, STLR_TIMER_GPTMTAMR(0)); break; case CLOCK_EVT_MODE_RESUME: printk(KERN_DEBUG "%s\n", "\tCLOCK_EVT_MODE_RESUME"); break; case CLOCK_EVT_MODE_UNUSED: printk(KERN_DEBUG "%s\n", "\tCLOCK_EVT_MODE_UNUSED"); break; case CLOCK_EVT_MODE_SHUTDOWN: printk(KERN_DEBUG "%s\n", "\tCLOCK_EVT_MODE_SHUTDOWN"); break; } }
static void timer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; local_irq_save(flags); switch (mode) { case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: disable_timer(group2_base, 0); break; case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; } local_irq_restore(flags); }
void timer_thread(void) { char fast_flag=0; char slow_flag=0; //char dns_flag=0; disable_timer();//变量重入 fast_flag=time_flag; time_flag=0; slow_flag=slow_timer; if(dns_time>4){ dns_time=0; } enable_timer(); if(fast_flag){//lwip不可重入解决方法 time_flag=0; tcp_fasttmr(); if(slow_flag) tcp_slowtmr(); // if(dns_flag) // dns_tmr();//dns updata } }
static void percpu_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; u32 cpuid = hard_smp_processor_id(); u32 timern = cpuid & 0x1; void __iomem *base = irq_map[cpuid].base; spin_lock_irqsave(&soc_tmr_lock[cpuid>>1], flags); switch (mode) { case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: disable_timer(base, timern); break; case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; } spin_unlock_irqrestore(&soc_tmr_lock[cpuid>>1], flags); }
/* * This function is the exit routine and is called by the crtinit, when the * program terminates. The name needs to be changed later.. */ void _profile_clean( void ) { Xil_ExceptionDisable(); disable_timer(); }
int __init main(int argc, char **argv, char **envp) { char **new_argv; int ret, i, err; set_stklim(); setup_env_path(); new_argv = malloc((argc + 1) * sizeof(char *)); if (new_argv == NULL) { perror("Mallocing argv"); exit(1); } for (i = 0; i < argc; i++) { new_argv[i] = strdup(argv[i]); if (new_argv[i] == NULL) { perror("Mallocing an arg"); exit(1); } } new_argv[argc] = NULL; /* * Allow these signals to bring down a UML if all other * methods of control fail. */ install_fatal_handler(SIGINT); install_fatal_handler(SIGTERM); install_fatal_handler(SIGHUP); scan_elf_aux(envp); do_uml_initcalls(); ret = linux_main(argc, argv); /* * Disable SIGPROF - I have no idea why libc doesn't do this or turn * off the profiling time, but UML dies with a SIGPROF just before * exiting when profiling is active. */ change_sig(SIGPROF, 0); /* * This signal stuff used to be in the reboot case. However, * sometimes a SIGVTALRM can come in when we're halting (reproducably * when writing out gcov information, presumably because that takes * some time) and cause a segfault. */ /* stop timers and set SIGVTALRM to be ignored */ disable_timer(); /* disable SIGIO for the fds and set SIGIO to be ignored */ err = deactivate_all_fds(); if (err) printf("deactivate_all_fds failed, errno = %d\n", -err); /* * Let any pending signals fire now. This ensures * that they won't be delivered after the exec, when * they are definitely not expected. */ unblock_signals(); /* Reboot */ if (ret) { printf("\n"); execvp(new_argv[0], new_argv); perror("Failed to exec kernel"); ret = 1; } printf("\n"); return uml_exitcode; }
static int itimer_shutdown(struct clock_event_device *evt) { disable_timer(); return 0; }
int main (int argc, char **argv, char **envp) { global_prog_name = argv[0]; if (argc == 1) { usage(global_prog_name); exit(1); } // Determine which version of OS X we are running on. int major_version_num; int minor_version_num; int subminor_version_num; get_macosx_version(&major_version_num, &minor_version_num, &subminor_version_num); struct poll_state ps; ps.use_polling = 0; if (major_version_num >= 10) { // Mac OS X 10.5.* and earlier return a number that appears // correct from the ru_maxrss field of getrusage(), also // filled in by wait4(). // Mac OS X 10.6.* always returns 0 for ru_maxrss, so we must // use a different method to measure it. ps.use_polling = 1; } char **child_argv = (char **) malloc((unsigned) argc * sizeof(char *)); int i; for (i = 1; i < argc; i++) { child_argv[i-1] = argv[i]; } child_argv[argc-1] = NULL; // char **p; // for (p = child_argv; *p != NULL; p++) { // fprintf(stderr, " p[%d]='%s'", p-child_argv, *p); // } // fprintf(stderr, "\n"); struct timeval start_time; struct timeval end_time; unsigned int num_cpus = get_num_cpus(); cpu_usage *total_cpu_stats_start = malloc_cpu_stats(1); cpu_usage *per_cpu_stats_start = malloc_cpu_stats(num_cpus); cpu_usage *total_cpu_stats_end = malloc_cpu_stats(1); cpu_usage *per_cpu_stats_end = malloc_cpu_stats(num_cpus); get_cpu_usage(num_cpus, per_cpu_stats_start, total_cpu_stats_start); int ret = gettimeofday(&start_time, NULL); // tbd: check ret pid_t pid = fork(); if (pid == -1) { fprintf(stderr, "Error return status -1 while attempting" " to call fork(). errno=%d\n", errno); perror(global_prog_name); exit(3); } else if (pid == 0) { // We are the child process // Set the uid to the original uid of the process that invoked // the timemem-darwin process, so that the command being // measured is run with that user's priviliges, not with root // privileges. int original_uid = getuid(); int ret = setuid(original_uid); if (ret != 0) { fprintf(stderr, "Error return status %d while attempting" " to set uid to %d. errno=%d\n", ret, original_uid, errno); perror(global_prog_name); exit(4); } ret = execvp(child_argv[0], child_argv); // Normally the call above will not return. fprintf(stderr, "Error return status %d while attempting" " to call execvp(). errno=%d\n", ret, errno); perror(global_prog_name); exit(2); } // We are the parent process. // We want to wait until the child process finishes, but we also // want to periodically poll the child process's resident set size // (memory usage). On OS X 10.5.8 and earlier, simply using // wait4() for the child to finish would fill in the rusage struct // with the maximum resident set size, but this value is always // filled in with 0 in OS X 10.6, hence the use of polling. // We implement the polling by calling setitimer() so that we are // sent a SIGALRM signal every 100 msec. This should cause // wait4() to return early. We handle the signal, and then call // wait4() again. // Read the current maximum resident set size once before starting // the timer, because the most likely reason for it to fail is // that we are not running with root privileges. if (ps.use_polling) { if (init_polling_process_rss(pid, &ps) != 0) { run_as_superuser_msg(global_prog_name); exit(1); } poll_process_rss(&ps); // Set up the SIGALRM signal handler. global_sigalrm_handled = 0; enable_handling_sigalrm(); // Set timer to send us a SIGALRM signal every 100 msec. int timer_period_msec = 100; enable_timer(timer_period_msec); } //int wait_opts = WNOHANG; int wait_opts = 0; int wait_status; int wait4_ret; struct rusage r; while (1) { wait4_ret = wait4(pid, &wait_status, wait_opts, &r); if (wait4_ret != -1) { break; } if ((errno == EINTR) && ps.use_polling) { // Most likely the SIGALRM timer signal was handled. If // so, poll the child process's memory use once. The // timer should automatically signal again periodically // without having to reset it. if (global_sigalrm_handled) { poll_process_rss(&ps); global_sigalrm_handled = 0; if (ps.task_info_errored) { disable_timer(); ignore_sigalrm(); } } // Go around and call wait4() again. } else { fprintf(stderr, "wait4() returned %d. errno=%d\n", wait4_ret, errno); perror(global_prog_name); exit(5); } } // We may not use end_time if there are errors we haven't checked // for yet from wait4(), but it is more accurate to call this as // soon after wait4() returns as we can. It is out of the loop // above to avoid the overhead of calling it on every poll time. if (debug) { fprintf(stderr, "About to call gettimeofday()\n"); } ret = gettimeofday(&end_time, NULL); // tbd: check ret get_cpu_usage(num_cpus, per_cpu_stats_end, total_cpu_stats_end); if (wait4_ret != pid) { fprintf(stderr, "wait4() returned pid=%d. Expected pid" " %d of child process. Try again.\n", wait4_ret, pid); fprintf(stderr, "wait4 r.ru_maxrss=%ld\n", r.ru_maxrss); exit(7); } ps.wait4_returned_normally = 1; if (debug) { fprintf(stderr, "wait4() returned pid=%d of child process." " Done!\n", pid); } if (ps.use_polling) { // Disable the timer. Ignore SIGALRM, too, just in case one // more happens. if (debug) { fprintf(stderr, "About to disable the timer\n"); } disable_timer(); if (debug) { fprintf(stderr, "About to ignore SIGALRM\n"); } ignore_sigalrm(); } // Elapsed time int elapsed_msec = timeval_diff_msec(&start_time, &end_time); fprintf(stderr, "real %9d.%03d\n", (elapsed_msec / 1000), (elapsed_msec % 1000)); // User, sys times fprintf(stderr, "user %9ld.%03d\n", r.ru_utime.tv_sec, r.ru_utime.tv_usec / 1000); fprintf(stderr, "sys %9ld.%03d\n", r.ru_stime.tv_sec, r.ru_stime.tv_usec / 1000); // Maximum resident set size if (! ps.use_polling) { // At least on the Intel Core 2 Duo Mac OS X 10.5.8 machine on // which I first tested this code, it seemed to give a value // of up to 2^31-4096 bytes correctly, but if it went a little // bit over that, the fprintf statement showed it as 0, not // 2^31 bytes. For now, I'll do a special check for 0 and // print out what I believe to be the correct value. // One way to test this on that machine is as follows. The // first command below prints 2^31-4096 bytes as the maximum // resident set size. Without the "if" condition below, the // second command below prints 0 as the maximum resident set // size. // ./timemem-darwin ../../memuse/test-memuse 2096863 // ./timemem-darwin ../../memuse/test-memuse 2096864 // Reference: // http://lists.apple.com/archives/darwin-kernel/2009/Mar/msg00005.html if (r.ru_maxrss == 0L) { // Print 2^31 bytes exactly fprintf(stderr, "2147483648 maximum resident set size from getrusage\n"); } else { fprintf(stderr, "%10lu maximum resident set size from getrusage\n", (unsigned long) r.ru_maxrss); } } if (ps.use_polling) { long delta = (long) ps.max_rss_bytes - (long) r.ru_maxrss; fprintf(stderr, "%10lu maximum resident set size from polling (%.1f MB, delta %ld bytes = %.1f MB)\n", (unsigned long) ps.max_rss_bytes, (double) ps.max_rss_bytes / (1024.0 * 1024.0), delta, (double) delta / (1024.0 * 1024.0)); double elapsed_time_sec = (double) elapsed_msec / 1000.0; fprintf(stderr, "number of times rss polled=%ld, avg of %.1f times per second\n", ps.num_polls, (double) ps.num_polls / elapsed_time_sec); fprintf(stderr, "time between consecutive polls (msec): min=%.1f max=%.1f\n", (double) ps.consecutive_poll_separation_min_msec, (double) ps.consecutive_poll_separation_max_msec); int64 max_rss_first_seen_msec = timeval_diff_msec(&start_time, &(ps.poll_time_when_maxrss_first_seen)); fprintf(stderr, "Max RSS observed %.1f sec after start time\n", (double) max_rss_first_seen_msec / 1000.0); if (ps.task_info_errored) { int64 diff_msec = timeval_diff_msec(&end_time, &(ps.task_info_error_time)); if (diff_msec <= 0 && diff_msec >= -100) { // Then the error most likely occurred because the // child process had already exited. Ignore it. } else { fprintf(stderr, "A call to task_info() returned an error. error_time - end_time = %.3f sec. This may mean the maximum resident set size measurement above is too low.\n", (double) diff_msec / 1000.0); } } } // Show separate busy percentage for each CPU core fprintf(stderr, "Per core CPU utilization (%d cores):", num_cpus); for (i = 0; i < num_cpus; i++) { uint64 total = (per_cpu_stats_end[i].total - per_cpu_stats_start[i].total); int cpu_busy_percent = 0; if (total != 0) { uint64 idle = (per_cpu_stats_end[i].idle - per_cpu_stats_start[i].idle); cpu_busy_percent = (int) round(100.0 * (1.0 - ((float) idle)/total)); } fprintf(stderr, " %d%%", cpu_busy_percent); } fprintf(stderr, "\n"); if (WIFEXITED(wait_status)) { // Exit with the same status that the child process did. exit(WEXITSTATUS(wait_status)); } else if (WIFSIGNALED(wait_status)) { fprintf(stderr, "Command stopped due to signal %d without calling exit().\n", WTERMSIG(wait_status)); exit(1); } else { fprintf(stderr, "Command is stopped due to signal %d, and can be restarted.\n", WSTOPSIG(wait_status)); exit(2); } return 0; }
void mcount( unsigned long frompc, unsigned long selfpc ) { register struct gmonparam *p = NULL; register long toindex, fromindex; int j; disable_timer(); //print("CG: "); putnum(frompc); print("->"); putnum(selfpc); print("\r\n"); // check that frompcindex is a reasonable pc value. // for example: signal catchers get called from the stack, // not from text space. too bad. // for(j = 0; j < n_gmon_sections; j++ ){ if((frompc >= _gmonparam[j].lowpc) && (frompc < _gmonparam[j].highpc)) { p = &_gmonparam[j]; break; } } if( j == n_gmon_sections ) goto done; #ifdef PROFILE_NO_FUNCPTR fromindex = searchpc( p->cgtable, p->cgtable_size, frompc ) ; if( fromindex == -1 ) { fromindex = p->cgtable_size ; p->cgtable_size++ ; p->cgtable[fromindex].frompc = frompc ; p->cgtable[fromindex].selfpc = selfpc ; p->cgtable[fromindex].count = 1 ; goto done ; } p->cgtable[fromindex].count++ ; #else fromindex = searchpc( p->froms, p->fromssize, frompc ) ; if( fromindex == -1 ) { fromindex = p->fromssize ; p->fromssize++ ; //if( fromindex >= N_FROMS ) { //print("Error : From PC table overflow\r\n") ; //goto overflow ; //} p->froms[fromindex].frompc = frompc ; p->froms[fromindex].link = -1 ; }else { toindex = p->froms[fromindex].link ; while(toindex != -1) { toindex = (p->tossize - toindex)-1 ; if( p->tos[toindex].selfpc == selfpc ) { p->tos[toindex].count++ ; goto done ; } toindex = p->tos[toindex].link ; } } //if( toindex == -1 ) { p->tos-- ; p->tossize++ ; //if( toindex >= N_TOS ) { //print("Error : To PC table overflow\r\n") ; //goto overflow ; //} p->tos[0].selfpc = selfpc ; p->tos[0].count = 1 ; p->tos[0].link = p->froms[fromindex].link ; p->froms[fromindex].link = p->tossize-1 ; #endif done: p->state = GMON_PROF_ON; goto enable_timer ; //overflow: p->state = GMON_PROF_ERROR; enable_timer: enable_timer(); return ; }