/* Waits until the head of the log contains an entry matching pertinent fields of 'my_entry'. When it does, 'my_entry' is modified to point to the head of the log. */ void waitForTurn(log_entry_t *my_entry, turn_pred_t pred) { memfence(); JASSERT(my_clone_id != -1); while (1) { dmtcp::ThreadInfo::waitForTurn(); memfence(); log_entry_t& temp_entry = global_log.getCurrentEntry(); JASSERT(temp_entry.cloneId() == my_clone_id) (temp_entry.cloneId()) (my_clone_id); if ((*pred)(&temp_entry, my_entry)) break; /* Also check for an optional event for this clone_id. */ if (temp_entry.cloneId() == my_clone_id && temp_entry.isOptional()) { if (!is_optional_event_for(my_entry->eventId(), temp_entry.eventId(), false)) { JASSERT(false) (my_entry->eventId()) (temp_entry.eventId()) (global_log.getIndex()); } memfence(); dmtcp::ThreadInfo::wakeUpThread(my_clone_id); execute_optional_event(temp_entry.eventId()); } else { JASSERT(false); } } *my_entry = global_log.getCurrentEntry(); }
int __real_dmtcpCheckpoint(){ int rv = 0; int oldNumRestarts = numRestarts; int oldNumCheckpoints = numCheckpoints; memfence(); //make sure the reads above don't get reordered if(dmtcpRunCommand('c')){ //request checkpoint //and wait for the checkpoint while(oldNumRestarts==numRestarts && oldNumCheckpoints==numCheckpoints){ //nanosleep should get interrupted by checkpointing with an EINTR error //though there is a race to get to nanosleep() before the checkpoint struct timespec t = {1,0}; nanosleep(&t, NULL); memfence(); //make sure the loop condition doesn't get optimized } rv = (oldNumRestarts==numRestarts ? DMTCP_AFTER_CHECKPOINT : DMTCP_AFTER_RESTART); }else{ /// TODO: Maybe we need to process it in some way???? /// EXIT???? /// -- Artem // printf("\n\n\nError requesting checkpoint\n\n\n"); } return rv; }
NO_OPTIMIZE static void restart_fast_path() { int mtcp_sys_errno; void *addr = mtcp_sys_mmap(rinfo.restore_addr, rinfo.restore_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { MTCP_PRINTF("mmap failed with error; errno: %d\n", mtcp_sys_errno); mtcp_abort(); } size_t offset = (char*)&restorememoryareas - rinfo.text_addr; rinfo.restorememoryareas_fptr = (fnptr_t)(rinfo.restore_addr + offset); /* For __arm__ * should be able to use kernel call: __ARM_NR_cacheflush(start, end, flag) * followed by copying new text below, followed by DSB and ISB, * to eliminstate need for delay loop. But this needs more testing. */ mtcp_memcpy(rinfo.restore_addr, rinfo.text_addr, rinfo.text_size); mtcp_memcpy(rinfo.restore_addr + rinfo.text_size, &rinfo, sizeof(rinfo)); void *stack_ptr = rinfo.restore_addr + rinfo.restore_size - MB; #if defined(__INTEL_COMPILER) && defined(__x86_64__) memfence(); asm volatile (CLEAN_FOR_64_BIT(mov %0,%%esp;)
void waitForExecBarrier() { while (1) { const log_entry_t& temp_entry = global_log.getCurrentEntry(); if (temp_entry.eventId() == exec_barrier_event) { // We don't check clone ids because anyone can do an exec. break; } memfence(); usleep(20); } }
void getNextLogEntry() { JASSERT(my_clone_id != -1); // If log is empty, don't do anything if (global_log.numEntries() == 0) { return; } if (global_log.advanceToNextEntry() == 0) { JTRACE ( "Switching back to record." ); set_sync_mode(SYNC_RECORD); } else { memfence(); const log_entry_t& temp_entry = global_log.getCurrentEntry(); clone_id_t clone_id = temp_entry.cloneId(); dmtcp::ThreadInfo::wakeUpThread(clone_id); } }