int main() { A a = get(0); std::cout << std::endl; // //uncomment one of the following #define QUICK //#undef QUICK #ifdef QUICK { for (int i = 0; i < 2; ++i) { A a = get_fast(i); // return value optimization ! (instead of using a temporary... the actual object a is used) } // http://www.informit.com/articles/article.aspx?p=25033&seqNum=4 } #else { A a; // programmers think it's fast to just construct this once (instead of on every loop) for (int i = 0; i < 2; ++i) { a = get_fast(i); // in actual fact this line is incredibly slow: constructor and destructor of a temporary, as well as copy-assignment!! } } #endif return 0; }
void scheduler() { unsigned int cpu_id = CPUID; struct scheduler_info *si = &(cpu[ cpu_id ].sched); struct thread *tr; int idle; int flip = 0; uint64_t requested_time = TIMESLICE; // We will never go back. set_cpu_flags( cpu_flags() & ~EFLAG_NESTED_TASK ); // Now we're working. acquire_spinlock( &(si->lock_scheduler) ); // Start timing the scheduler stats_time_start( &(cpu[ cpu_id ].st_schedulerTime) ); assert( (cpu_flags() & EFLAG_INTERRUPT) == 0 ); ack_apic(); while (1==1) { // Just show the world that we're still alive. ((char*)0xB8000)[158 - cpu_id * 2] ++ ; /// \todo remove one day // If the garbage collector has work to do, let it run. if ( gc_has_work( cpu_id ) == 0 ) { tr = smk_gc[ cpu_id ]; exec_thread( cpu_id, tr, TIMESLICE, 0 ); stats_time( cpu_id, &(cpu[ cpu_id ].st_systemTime) ); } // Find out when the next timed event is. requested_time = remove_timers( si, cpu[ cpu_id ].st_systemTime.usage ); idle = 0; // Fast queue support if ( flip == 0 ) { tr = get_fast( si ); if ( tr != NULL ) { exec_thread( cpu_id, tr, TIMESLICE, 0 ); stats_time( cpu_id, &(cpu[ cpu_id ].st_systemTime) ); // Maintain CPU time. if ( si->sched_count != 0 ) flip = 1; // Ensure others run continue; } } flip = 0; // Reset of fast queue // If there's nothing to do, do idle. if ( si->sched_count == 0 ) { if ( gc_has_work( cpu_id ) == 0 ) continue; // Al-e-oop! tr = smk_idle[ cpu_id ]; idle = 1; // Safe to wake up when required. } else { tr = si->sched_queue[ si->position ].tr; si->position = (si->position + 1) % si->sched_count; requested_time = TIMESLICE; idle = 0; // Don't interrupt until timeslice is over } // And run the selected thread. exec_thread( cpu_id, tr, requested_time, idle ); stats_time( cpu_id, &(cpu[ cpu_id ].st_systemTime) ); // Maintain CPU time. // // If the previous thread requested a state change, honour it. remove_last( si ); } }