bool interprocess_condition_variable::wait(interprocess_mutex::optional_unlock& lock, boost::detail::winapi::HANDLE_ abort_handle) { int32_t waiters = m_shared_state->m_waiters; if (waiters < 0) { // We need to select a new semaphore to block on m_current_semaphore = get_unused_semaphore(); ++m_shared_state->m_generation; m_shared_state->m_semaphore_id = m_current_semaphore->m_id; waiters = 0; } else { // Avoid integer overflow if (BOOST_UNLIKELY(waiters >= ((std::numeric_limits< int32_t >::max)() - 1))) BOOST_LOG_THROW_DESCR(limitation_error, "Too many waiters on an interprocess condition variable"); // Make sure we use the right semaphore to block on const uint32_t id = m_shared_state->m_semaphore_id; if (m_current_semaphore->m_id != id) m_current_semaphore = get_semaphore(id); } m_shared_state->m_waiters = waiters + 1; const uint32_t generation = m_shared_state->m_generation; boost::detail::winapi::HANDLE_ handles[2u] = { m_current_semaphore->m_semaphore.get_handle(), abort_handle }; interprocess_mutex* const mutex = lock.disengage(); mutex->unlock(); boost::detail::winapi::DWORD_ retval = boost::detail::winapi::WaitForMultipleObjects(2u, handles, false, boost::detail::winapi::INFINITE_); if (BOOST_UNLIKELY(retval == boost::detail::winapi::WAIT_FAILED_)) { const boost::detail::winapi::DWORD_ err = boost::detail::winapi::GetLastError(); // Although highly unrealistic, it is possible that it took so long for the current thread to enter WaitForMultipleObjects that // another thread has managed to destroy the semaphore. This can happen if the semaphore remains in a non-zero state // for too long, which means that another process died while being blocked on the semaphore, and the semaphore was signalled, // and the non-zero state timeout has passed. In this case the most logical behavior for the wait function is to return as // if because of a wakeup. if (err == ERROR_INVALID_HANDLE) retval = boost::detail::winapi::WAIT_OBJECT_0_; else BOOST_LOG_THROW_DESCR_PARAMS(boost::log::system_error, "Failed to block on an interprocess semaphore object", (err)); } // Have to unconditionally lock the mutex here mutex->lock(); lock.engage(*mutex); if (generation == m_shared_state->m_generation && m_shared_state->m_waiters > 0) --m_shared_state->m_waiters; return retval == boost::detail::winapi::WAIT_OBJECT_0_; }
SuiteCollection::~SuiteCollection() { for (auto var = suits_.begin(); var != suits_.end(); ++var) { if (!CloseHandle(var->get_semaphore())) logger << "Closing semaphore's handle failed"; } if (!CloseHandle(global_semaphore_)) logger << "Closing semaphore's handle failed"; if (!threads_.DestroyPool()) logger << "DestroyPool failed"; }
int main(int argc, char *argv[]) { void *addr; int i; int ret; int fd = 0; int semid; int semaphore; int inject = 0; int madvise_code = MADV_HWPOISON; int early_kill = 0; int avoid_touch = 0; int anonflag = 0; int shmflag = 0; int shmkey = 0; int forkflag = 0; int privateflag = 0; int cowflag = 0; char c; pid_t pid = 0; void *expected_addr = NULL; struct sembuf sembuffer; PS = getpagesize(); HPS = HPAGE_SIZE; file_size = 1; corrupt_page = -1; if (argc == 1) { usage(); exit(EXIT_FAILURE); } while ((c = getopt_long(argc, argv, "m:o:xOeSAaFpcf:h", opts, NULL)) != -1) { switch (c) { case 'm': file_size = strtol(optarg, NULL, 10); break; case 'o': corrupt_page = strtol(optarg, NULL, 10); break; case 'x': inject = 1; break; case 'O': madvise_code = MADV_SOFT_OFFLINE; break; case 'e': early_kill = 1; break; case 'S': shmflag = 1; break; case 'A': anonflag = 1; break; case 'a': avoid_touch = 1; break; case 'F': forkflag = 1; break; case 'p': privateflag = 1; break; case 'c': cowflag = 1; break; case 'f': strcat(filename, optarg); shmkey = strtol(optarg, NULL, 10); break; case 'h': usage(); exit(EXIT_SUCCESS); default: usage(); exit(EXIT_FAILURE); } } if (inject && corrupt_page * PS > file_size * HPAGE_SIZE) errmsg("Target page is out of range.\n"); if (avoid_touch && corrupt_page == -1) errmsg("Avoid which page?\n"); /* Construct file name */ if (access(argv[argc - 1], F_OK) == -1) { usage(); exit(EXIT_FAILURE); } else { strcpy(filepath, argv[argc - 1]); strcat(filepath, filename); } if (shmflag) { addr = alloc_shm_hugepage(&shmkey, file_size * HPAGE_SIZE); if (!addr) errmsg("Failed in alloc_shm_hugepage()"); } else if (anonflag) { addr = alloc_anonymous_hugepage(file_size * HPAGE_SIZE, privateflag); if (!addr) errmsg("Failed in alloc_anonymous_hugepage()"); } else { addr = alloc_filebacked_hugepage(filepath, file_size * HPAGE_SIZE, privateflag, &fd); if (!addr) errmsg("Failed in alloc_filebacked_hugepage()"); } if (corrupt_page != -1 && avoid_touch) expected_addr = (void *)(addr + corrupt_page / 512 * HPAGE_SIZE); if (forkflag) { semid = semget(IPC_PRIVATE, 1, 0666|IPC_CREAT); if (semid == -1) { perror("semget"); goto cleanout; } semaphore = semctl(semid, 0, SETVAL, 1); if (semaphore == -1) { perror("semctl"); goto cleanout; } if (get_semaphore(semid, &sembuffer)) { perror("get_semaphore"); goto cleanout; } } write_hugepage(addr, file_size, 0); read_hugepage(addr, file_size, 0); if (early_kill) prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, NULL, NULL); /* * Intended order: * 1. Child COWs * 2. Parent madvise()s * 3. Child exit()s */ if (forkflag) { pid = fork(); if (!pid) { /* Semaphore is already held */ if (cowflag) { write_hugepage(addr, file_size, 0); read_hugepage(addr, file_size, 0); } if (put_semaphore(semid, &sembuffer)) err("put_semaphore"); usleep(1000); /* Wait for madvise() to be done */ if (get_semaphore(semid, &sembuffer)) err("put_semaphore"); if (put_semaphore(semid, &sembuffer)) err("put_semaphore"); return 0; } } /* Wait for COW */ if (forkflag && get_semaphore(semid, &sembuffer)) { perror("get_semaphore"); goto cleanout; } if (inject && corrupt_page != -1) { ret = madvise(addr + corrupt_page * PS, PS, madvise_code); if (ret) { printf("madivise return %d :", ret); perror("madvise"); goto cleanout; } } if (forkflag && put_semaphore(semid, &sembuffer)) { perror("put_semaphore"); goto cleanout; } if (madvise_code != MADV_SOFT_OFFLINE); write_hugepage(addr, file_size, expected_addr); read_hugepage(addr, file_size, expected_addr); if (forkflag) { if (wait(&i) == -1) err("wait"); if (semctl(semid, 0, IPC_RMID) == -1) err("semctl(IPC_RMID)"); } cleanout: if (shmflag) { if (free_shm_hugepage(shmkey, addr) == -1) exit(2); } else if (anonflag) { if (free_anonymous_hugepage(addr, file_size * HPAGE_SIZE) == -1) exit(2); } else { if (free_filebacked_hugepage(addr, file_size * HPAGE_SIZE, fd, filepath) == -1) exit(2); } return 0; }
/* OBTENER SEMAFOROS */ void get_sems(sem_t **p_sem_symbol_ready,sem_t **p_sem_symbol_decoded){ *p_sem_symbol_ready = get_semaphore(SEM_SYMBOL_READY); *p_sem_symbol_decoded = get_semaphore(SEM_SYMBOL_DECODED); }
int main(int argc, char *argv[]) { int fd; int sem; int nrpages = 1; int ret = 0; int tmp = 0; int offset = 0; char c; char *filename; char *actype; char *onerror; char *p; pid_t pid; int wait_status; uint64_t pflag; struct sembuf sembuf; struct pagestat pgstat; if (argc != 5) { printf("Usage: %s filename nrpages accesstype onerror\n", argv[0]); exit(EXIT_FAILURE); } filename = argv[1]; nrpages = strtol(argv[2], NULL, 10); actype = argv[3]; onerror = argv[4]; DEB("filename = %s, nrpages = %d, actype = %s, onerror = %s\n", filename, nrpages, actype, onerror); if (strcmp(onerror, "onerror") == 0) offset = 0; else offset = PS; sem = create_and_init_semaphore(); fd = open_check(filename, O_RDWR, 0); tmp = pread(fd, rbuf, nrpages*PS, 0); DEB("parent first read %d [%c,%c]\n", tmp, rbuf[0], rbuf[PS]); get_semaphore(sem, &sembuf); if ((pid = fork()) == 0) { get_semaphore(sem, &sembuf); /* wait parent to dirty page */ p = mmap_check((void *)REFADDR, nrpages * PS, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p != (void *)REFADDR) err("mmap"); if (nrpages == 1) { DEB("child read (after dirty) [%c]\n", p[0]); #ifdef DEBUG get_pagestat(p, &pgstat); #endif } else { DEB("child read (after dirty) [%c,%c]\n", p[0], p[PS]); #ifdef DEBUG get_pagestat(p, &pgstat); get_pagestat(p+PS, &pgstat); #endif } DEB("child hwpoison to vaddr %p\n", p); madvise(&p[0], PS, 100); /* hwpoison */ put_semaphore(sem, &sembuf); get_semaphore(sem, &sembuf); DEB("child terminated\n"); put_semaphore(sem, &sembuf); get_pflags(pgstat.pfn, &pflag, 1); exit(EXIT_SUCCESS); } else { DEB("parent dirty\n"); usleep(1000); memset(wbuf, 49, nrpages * PS); pwrite(fd, wbuf, nrpages * PS, 0); tmp = pread(fd, rbuf, nrpages * PS, 0); DEB("parent second read (after dirty) %d [%c,%c]\n", tmp, rbuf[0], rbuf[PS]); put_semaphore(sem, &sembuf); /* kick child to inject error */ get_semaphore(sem, &sembuf); /* pagecache should be hwpoison */ DEB("parent check\n"); if (strcmp(actype, "read") == 0) { tmp = pread(fd, rbuf, PS, offset); if (tmp < 0) DEB("parent first read failed.\n"); tmp = pread(fd, rbuf, PS, offset); DEB("parent read after hwpoison %d [%c,%c]\n", tmp, rbuf[0], rbuf[PS]); if (tmp < 0) { ret = -1; perror("read"); } else { ret = 0; } } else if (strcmp(actype, "writefull") == 0) { memset(wbuf, 50, nrpages * PS); tmp = pwrite(fd, wbuf, PS, offset); tmp = pwrite(fd, wbuf, PS, offset); DEB("parent write after hwpoison %d\n", tmp); if (tmp < 0) { ret = -1; perror("writefull"); } else { ret = 0; } } else if (strcmp(actype, "writepart") == 0) { memset(wbuf, 50, nrpages * PS); tmp = pwrite(fd, wbuf, PS / 2, offset); tmp = pwrite(fd, wbuf, PS / 2, offset); DEB("parent write after hwpoison %d\n", tmp); if (tmp < 0) { ret = -1; perror("writefull"); } else { ret = 0; } } else if (strcmp(actype, "fsync") == 0) { ret = fsync(fd); ret = fsync(fd); DEB("parent fsync after hwpoison [ret %d]\n", ret); if (ret) perror("fsync"); } else if (strcmp(actype, "sync_range_write") == 0) { ret = sync_file_range(fd, offset, PS, SYNC_FILE_RANGE_WRITE); ret = sync_file_range(fd, offset, PS, SYNC_FILE_RANGE_WRITE); if (ret) perror("sync_range_write"); } else if (strcmp(actype, "sync_range_wait") == 0) { ret = sync_file_range(fd, offset, PS, SYNC_FILE_RANGE_WAIT_BEFORE); ret = sync_file_range(fd, offset, PS, SYNC_FILE_RANGE_WAIT_BEFORE); if (ret) perror("sync_range_wait"); } else if (strcmp(actype, "mmapread") == 0) { /* * If mmap access failed, this program should be * terminated by segmentation fault with non-zero * returned value. So we don't set ret here. */ p = mmap_check((void *)REFADDR, nrpages * PS, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p != (void *)REFADDR) err("mmap"); c = p[offset]; DEB("parent mmap() read after hwpoison [%c]\n", p[offset]); } else if (strcmp(actype, "mmapwrite") == 0) { p = mmap_check((void *)REFADDR, nrpages * PS, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p != (void *)REFADDR) err("mmap"); memset(&p[offset], 50, PS); DEB("parent mmap() write after hwpoison [%c]\n", p[offset]); } } put_semaphore(sem, &sembuf); waitpid(pid, &wait_status, 0); if (!WIFEXITED(wait_status)) err("waitpid"); delete_semaphore(sem); DEB("parent exit %d.\n", ret); return ret; }
void get_sems(sem_t **p_sem_task_ready, sem_t **p_sem_task_read, sem_t **p_sem_task_processed) { *p_sem_task_ready = get_semaphore(SEM_TASK_READY); *p_sem_task_read = get_semaphore(SEM_TASK_READ); *p_sem_task_processed = get_semaphore(SEM_TASK_PROCESSED); }
/** Please see header for specification */ void Anvil::Queue::submit(const Anvil::SubmitInfo& in_submit_info) { Anvil::Fence* fence_ptr (in_submit_info.get_fence() ); bool needs_fence_reset(false); VkResult result (VK_ERROR_INITIALIZATION_FAILED); Anvil::StructChainer<VkSubmitInfo> struct_chainer; std::vector<VkCommandBuffer> cmd_buffers_vk (in_submit_info.get_n_command_buffers () ); std::vector<VkSemaphore> signal_semaphores_vk(in_submit_info.get_n_signal_semaphores() ); std::vector<VkSemaphore> wait_semaphores_vk (in_submit_info.get_n_wait_semaphores () ); ANVIL_REDUNDANT_VARIABLE(result); /* Prepare for the submission */ switch (in_submit_info.get_type() ) { case SubmissionType::SGPU: { VkSubmitInfo submit_info; for (uint32_t n_command_buffer = 0; n_command_buffer < in_submit_info.get_n_command_buffers(); ++n_command_buffer) { cmd_buffers_vk.at(n_command_buffer) = in_submit_info.get_command_buffers_sgpu()[n_command_buffer]->get_command_buffer(); } for (uint32_t n_signal_semaphore = 0; n_signal_semaphore < in_submit_info.get_n_signal_semaphores(); ++n_signal_semaphore) { auto sem_ptr = in_submit_info.get_signal_semaphores_sgpu()[n_signal_semaphore]; signal_semaphores_vk.at(n_signal_semaphore) = sem_ptr->get_semaphore(); } for (uint32_t n_wait_semaphore = 0; n_wait_semaphore < in_submit_info.get_n_wait_semaphores(); ++n_wait_semaphore) { wait_semaphores_vk.at(n_wait_semaphore) = in_submit_info.get_wait_semaphores_sgpu()[n_wait_semaphore]->get_semaphore(); } submit_info.commandBufferCount = in_submit_info.get_n_command_buffers (); submit_info.pCommandBuffers = (in_submit_info.get_n_command_buffers() != 0) ? &cmd_buffers_vk.at(0) : nullptr; submit_info.pNext = nullptr; submit_info.pSignalSemaphores = (in_submit_info.get_n_signal_semaphores() != 0) ? &signal_semaphores_vk.at(0) : nullptr; submit_info.pWaitDstStageMask = in_submit_info.get_destination_stage_wait_masks(); submit_info.pWaitSemaphores = (in_submit_info.get_n_wait_semaphores() != 0) ? &wait_semaphores_vk.at(0) : nullptr; submit_info.signalSemaphoreCount = in_submit_info.get_n_signal_semaphores(); submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = in_submit_info.get_n_wait_semaphores(); struct_chainer.append_struct(submit_info); break; } default: { anvil_assert_fail(); } } /* Any additional structs to chain? */ #if defined(_WIN32) { const uint64_t* d3d12_fence_signal_semaphore_values_ptr = nullptr; const uint64_t* d3d12_fence_wait_semaphore_values_ptr = nullptr; if (in_submit_info.get_d3d12_fence_semaphore_values(&d3d12_fence_signal_semaphore_values_ptr, &d3d12_fence_wait_semaphore_values_ptr) ) { VkD3D12FenceSubmitInfoKHR fence_info; fence_info.pNext = nullptr; fence_info.pSignalSemaphoreValues = d3d12_fence_signal_semaphore_values_ptr; fence_info.pWaitSemaphoreValues = d3d12_fence_wait_semaphore_values_ptr; fence_info.signalSemaphoreValuesCount = in_submit_info.get_n_signal_semaphores(); fence_info.sType = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR; fence_info.waitSemaphoreValuesCount = in_submit_info.get_n_wait_semaphores(); struct_chainer.append_struct(fence_info); } } #endif /* Go for it */ if (fence_ptr == nullptr && in_submit_info.get_should_block() ) { fence_ptr = m_submit_fence_ptr.get(); needs_fence_reset = true; } switch (in_submit_info.get_type() ) { case SubmissionType::SGPU: { submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers (), in_submit_info.get_command_buffers_sgpu (), in_submit_info.get_n_signal_semaphores (), in_submit_info.get_signal_semaphores_sgpu(), in_submit_info.get_n_wait_semaphores (), in_submit_info.get_wait_semaphores_sgpu (), fence_ptr, true); /* in_should_lock */ break; } default: { anvil_assert_fail(); } } { auto chain_ptr = struct_chainer.create_chain(); if (needs_fence_reset) { m_submit_fence_ptr->reset(); } result = vkQueueSubmit(m_queue, 1, /* submitCount */ chain_ptr->get_root_struct(), (fence_ptr != nullptr) ? fence_ptr->get_fence() : VK_NULL_HANDLE); if (in_submit_info.get_should_block() ) { /* Wait till initialization finishes GPU-side */ result = vkWaitForFences(m_device_ptr->get_device_vk(), 1, /* fenceCount */ fence_ptr->get_fence_ptr(), VK_TRUE, /* waitAll */ UINT64_MAX); /* timeout */ anvil_assert_vk_call_succeeded(result); } } switch (in_submit_info.get_type() ) { case SubmissionType::SGPU: { submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers (), in_submit_info.get_command_buffers_sgpu (), in_submit_info.get_n_signal_semaphores (), in_submit_info.get_signal_semaphores_sgpu(), in_submit_info.get_n_wait_semaphores (), in_submit_info.get_wait_semaphores_sgpu (), fence_ptr, false); /* in_should_lock */ break; } default: { anvil_assert_fail(); } } anvil_assert_vk_call_succeeded(result); }