int sys_lwcond_signal_all(mem_ptr_t<sys_lwcond_t> lwcond) { sys_lwcond.Log("sys_lwcond_signal_all(lwcond_addr=0x%x)", lwcond.GetAddr()); if (!lwcond.IsGood()) return CELL_EFAULT; LWCond* lwc; u32 id = (u32)lwcond->lwcond_queue; if (!sys_lwcond.CheckId(id, lwc)) return CELL_ESRCH; lwc->signal_all(); return CELL_OK; }
int sys_lwcond_signal_to(mem_ptr_t<sys_lwcond_t> lwcond, u32 ppu_thread_id) { sys_lwcond.Log("sys_lwcond_signal_to(lwcond_addr=0x%x, ppu_thread_id=%d)", lwcond.GetAddr(), ppu_thread_id); if (!lwcond.IsGood()) return CELL_EFAULT; LWCond* lwc; u32 id = (u32)lwcond->lwcond_queue; if (!sys_lwcond.CheckId(id, lwc)) return CELL_ESRCH; if (!lwc->signal_to(ppu_thread_id)) return CELL_EPERM; return CELL_OK; }
s32 sys_event_flag_clear(u32 eflag_id, u64 bitptn) { sys_event_flag.Log("sys_event_flag_clear(eflag_id=%d, bitptn=0x%llx)", eflag_id, bitptn); EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); ef->m_mutex.lock(tid); ef->flags &= bitptn; ef->m_mutex.unlock(tid); return CELL_OK; }
s32 sys_event_flag_trywait(u32 eflag_id, u64 bitptn, u32 mode, vm::ptr<u64> result) { sys_event_flag.Log("sys_event_flag_trywait(eflag_id=%d, bitptn=0x%llx, mode=0x%x, result_addr=0x%x)", eflag_id, bitptn, mode, result.addr()); if (result) *result = 0; switch (mode & 0xf) { case SYS_EVENT_FLAG_WAIT_AND: break; case SYS_EVENT_FLAG_WAIT_OR: break; default: return CELL_EINVAL; } switch (mode & ~0xf) { case 0: break; // ??? case SYS_EVENT_FLAG_WAIT_CLEAR: break; case SYS_EVENT_FLAG_WAIT_CLEAR_ALL: break; default: return CELL_EINVAL; } EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); ef->m_mutex.lock(tid); u64 flags = ef->flags; if (((mode & SYS_EVENT_FLAG_WAIT_AND) && (flags & bitptn) == bitptn) || ((mode & SYS_EVENT_FLAG_WAIT_OR) && (flags & bitptn))) { if (mode & SYS_EVENT_FLAG_WAIT_CLEAR) { ef->flags &= ~bitptn; } else if (mode & SYS_EVENT_FLAG_WAIT_CLEAR_ALL) { ef->flags = 0; } if (result) *result = flags; ef->m_mutex.unlock(tid); return CELL_OK; } ef->m_mutex.unlock(tid); return CELL_EBUSY; }
s32 sys_mmapper_free_memory(u32 mem_id) { sys_mmapper.Warning("sys_mmapper_free_memory(mem_id=0x%x)", mem_id); // Check if this mem ID is valid. mmapper_info* info; if(!sys_mmapper.CheckId(mem_id, info)) return CELL_ESRCH; // Release the allocated memory and remove the ID. sys_mmapper.RemoveId(mem_id); return CELL_OK; }
int sys_mmapper_map_memory(u32 start_addr, u32 mem_id, u64 flags) { sc_mem.Warning("sys_mmapper_map_memory(start_addr=0x%x, mem_id=0x%x, flags=0x%llx)", start_addr, mem_id, flags); mmapper_info* info; if(!sc_mem.CheckId(mem_id, info)) return CELL_ESRCH; if(!Memory.Map(start_addr, info->addr, info->size)) { sc_mem.Error("sys_mmapper_map_memory failed!"); } return CELL_OK; }
s32 sys_rwlock_trywlock(u32 rw_lock_id) { sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=%d)", rw_lock_id); RWLock* rw; if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); if (!rw->wlock_check(tid)) return CELL_EDEADLK; if (!rw->wlock_trylock(tid, false)) return CELL_EBUSY; return CELL_OK; }
int sys_lwcond_wait(mem_ptr_t<sys_lwcond_t> lwcond, u64 timeout) { sys_lwcond.Log("sys_lwcond_wait(lwcond_addr=0x%x, timeout=%lld)", lwcond.GetAddr(), timeout); if (!lwcond.IsGood()) return CELL_EFAULT; LWCond* lwc; u32 id = (u32)lwcond->lwcond_queue; if (!sys_lwcond.CheckId(id, lwc)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); mem_ptr_t<sys_lwmutex_t> lwmutex(lwcond->lwmutex); if ((u32)lwmutex->owner.GetOwner() != tid) return CELL_EPERM; // caller must own this lwmutex lwc->begin_waiting(tid); u32 counter = 0; const u32 max_counter = timeout ? (timeout / 1000) : 20000; bool was_locked = true; do { if (Emu.IsStopped()) { ConLog.Warning("sys_lwcond_wait(sq id=%d, ...) aborted", id); return CELL_ETIMEDOUT; } if (was_locked) lwmutex->unlock(tid); Sleep(1); if (was_locked = (lwmutex->trylock(tid) == CELL_OK)) { if (lwc->check(tid)) { return CELL_OK; } } if (counter++ > max_counter) { if (!timeout) { sys_lwcond.Warning("sys_lwcond_wait(lwcond_addr=0x%x): TIMEOUT", lwcond.GetAddr()); counter = 0; } else { lwc->stop_waiting(tid); return CELL_ETIMEDOUT; } } } while (true); }
s32 sys_rwlock_destroy(u32 rw_lock_id) { sys_rwlock.Warning("sys_rwlock_destroy(rw_lock_id=%d)", rw_lock_id); RWLock* rw; if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH; std::lock_guard<std::mutex> lock(rw->m_lock); if (rw->wlock_queue.size() || rw->rlock_list.size() || rw->wlock_thread) return CELL_EBUSY; Emu.GetIdManager().RemoveID(rw_lock_id); return CELL_OK; }
int sys_memory_container_destroy(u32 cid) { sc_mem.Warning("sys_memory_container_destroy(cid=%d)", cid); // Check if this container ID is valid. MemoryContainerInfo* ct; if(!sc_mem.CheckId(cid, ct)) return CELL_ESRCH; // Release the allocated memory and remove the ID. Memory.Free(ct->addr); Emu.GetIdManager().RemoveID(cid); return CELL_OK; }
int sys_memory_container_destroy(u32 cid) { sc_mem.Warning("sys_memory_container_destroy(cid=0x%x)", cid); MemoryContainerInfo* ct; if(!sc_mem.CheckId(cid, ct)) { return CELL_ESRCH; } Memory.Free(ct->addr); Emu.GetIdManager().RemoveID(cid); return CELL_OK; }
s32 sys_mmapper_free_memory(u32 mem_id) { sys_mmapper.Warning("sys_mmapper_free_memory(mem_id=0x%x)", mem_id); // Check if this mem ID is valid. mmapper_info* info; if(!sys_mmapper.CheckId(mem_id, info)) return CELL_ESRCH; // Release the allocated memory and remove the ID. Memory.Free(info->addr); Emu.GetIdManager().RemoveID(mem_id); return CELL_OK; }
int sys_semaphore_post(u32 sem, int count) { sys_sem.Log("sys_semaphore_post(sem=%d, count=%d)", sem, count); semaphore* sem_data = nullptr; if(!sys_sem.CheckId(sem, sem_data)) return CELL_ESRCH; while(count--) { sem_data->sem_count++; // Increment internal counter for sys_semaphore_get_value. sem_data->sem.Post(); } return CELL_OK; }
s32 sys_event_flag_destroy(u32 eflag_id) { sys_event_flag.Warning("sys_event_flag_destroy(eflag_id=%d)", eflag_id); EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; if (ef->waiters.size()) // ??? { return CELL_EBUSY; } Emu.GetIdManager().RemoveID(eflag_id); return CELL_OK; }
int sys_event_flag_get(u32 eflag_id, mem64_t flags) { sys_event_flag.Warning("sys_event_flag_get(eflag_id=%d, flags_addr=0x%x)", eflag_id, flags.GetAddr()); EventFlag* ef; if(!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; if (!flags.IsGood()) { return CELL_EFAULT; } flags = ef->flags; // ??? return CELL_OK; }
int sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, u32 spup_addr) { sc_spu.Error("sys_spu_thread_group_connect_event_all_threads(id=%d, eq=%d, req=0x%llx, spup_addr=0x%x)", id, eq, req, spup_addr); EventQueue* equeue; if(!sys_event.CheckId(eq, equeue)) { return CELL_ESRCH; } if(!req) { return CELL_EINVAL; } SpuGroupInfo* group; if(!Emu.GetIdManager().GetIDData(id, group)) { return CELL_ESRCH; } /* for(u32 i=0; i<group->list.size(); ++i) { CPUThread* t; if(t = Emu.GetCPU().GetThread(group->list[i])) { bool finded_port = false; for(int j=0; j<equeue->pos; ++j) { if(!equeue->ports[j]->thread) { finded_port = true; equeue->ports[j]->thread = t; } } if(!finded_port) { return CELL_EISCONN; } } }*/ return CELL_OK; }
s32 sys_mmapper_map_memory(u32 start_addr, u32 mem_id, u64 flags) { sys_mmapper.Warning("sys_mmapper_map_memory(start_addr=0x%x, mem_id=0x%x, flags=0x%llx)", start_addr, mem_id, flags); // Check if this mem ID is valid. mmapper_info* info; if(!sys_mmapper.CheckId(mem_id, info)) return CELL_ESRCH; // Map the memory into the process address. if(!Memory.Map(start_addr, info->addr, info->size)) sys_mmapper.Error("sys_mmapper_map_memory failed!"); // Keep track of mapped addresses. mmapper_info_map[mem_id] = start_addr; return CELL_OK; }
int sys_memory_container_get_size(u32 mem_info_addr, u32 cid) { sc_mem.Warning("sys_memory_container_get_size(mem_info_addr=0x%x, cid=%d)", mem_info_addr, cid); // Check if this container ID is valid. MemoryContainerInfo* ct; if(!sc_mem.CheckId(cid, ct)) return CELL_ESRCH; // HACK: Return all memory. sys_memory_info info; info.total_user_memory = re(ct->size); info.available_user_memory = re(ct->size); Memory.WriteData(mem_info_addr, info); return CELL_OK; }
int sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, u32 spup_addr) { sc_spu.Warning("sys_spu_thread_group_connect_event_all_threads(id=0x%x, eq=0x%x, req=0x%llx, spup_addr=0x%x)", id, eq, req, spup_addr); EventQueue* equeue; if(!sys_event.CheckId(eq, equeue)) { return CELL_ESRCH; } if(!req) { return CELL_EINVAL; } SpuGroupInfo* group; if(!Emu.GetIdManager().GetIDData(id, group)) { return CELL_ESRCH; } for(int i=0; i<g_spu_group_thr_count; ++i) { if(group->threads[i]) { bool finded_port = false; for(int j=0; j<equeue->pos; ++j) { if(!equeue->ports[j]->thread) { finded_port = true; equeue->ports[j]->thread = group->threads[i]; } } if(!finded_port) { return CELL_EISCONN; } } } return CELL_OK; }
s32 sys_event_flag_get(u32 eflag_id, vm::ptr<u64> flags) { sys_event_flag.Log("sys_event_flag_get(eflag_id=%d, flags_addr=0x%x)", eflag_id, flags.addr()); if (!flags) { sys_event_flag.Error("sys_event_flag_create(): invalid memory access (flags_addr=0x%x)", flags.addr()); return CELL_EFAULT; } EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); ef->m_mutex.lock(tid); *flags = ef->flags; ef->m_mutex.unlock(tid); return CELL_OK; }
int sys_event_flag_cancel(u32 eflag_id, mem32_t num) { sys_event_flag.Warning("sys_event_flag_cancel(eflag_id=%d, num_addr=0x%x)", eflag_id, num.GetAddr()); EventFlag* ef; if(!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; Array<u32> tids; { SMutexLocker lock(ef->m_mutex); tids.SetCount(ef->waiters.GetCount()); for (u32 i = 0; i < ef->waiters.GetCount(); i++) { tids[i] = ef->waiters[i].tid; } ef->waiters.Clear(); } for (u32 i = 0; i < tids.GetCount(); i++) { if (Emu.IsStopped()) break; ef->signal.lock(tids[i]); } if (Emu.IsStopped()) { ConLog.Warning("sys_event_flag_cancel(id=%d) aborted", eflag_id); return CELL_OK; } if (num.IsGood()) { num = tids.GetCount(); return CELL_OK; } if (!num.GetAddr()) { return CELL_OK; } return CELL_EFAULT; }
s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, mem32_t mem_id) { sys_mmapper.Warning("sys_mmapper_allocate_memory_from_container(size=0x%x, cid=%d, flags=0x%llx, mem_id_addr=0x%x)", size, cid, flags, mem_id.GetAddr()); if(!mem_id.IsGood()) return CELL_EFAULT; // Check if this container ID is valid. MemoryContainerInfo* ct; if(!sys_mmapper.CheckId(cid, ct)) return CELL_ESRCH; // Check page granularity. switch(flags & (SYS_MEMORY_PAGE_SIZE_1M | SYS_MEMORY_PAGE_SIZE_64K)) { case SYS_MEMORY_PAGE_SIZE_1M: if(size & 0xfffff) return CELL_EALIGN; ct->addr = Memory.Alloc(size, 0x100000); break; case SYS_MEMORY_PAGE_SIZE_64K: if(size & 0xffff) return CELL_EALIGN; ct->addr = Memory.Alloc(size, 0x10000); break; default: return CELL_EINVAL; } if(!ct->addr) return CELL_ENOMEM; ct->size = size; // Generate a new mem ID. mem_id = sys_mmapper.GetNewId(new mmapper_info(ct->addr, ct->size, flags)); return CELL_OK; }
s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, u32 alloc_addr) { sys_mmapper.Warning("sys_mmapper_search_and_map(start_addr=0x%x, mem_id=0x%x, flags=0x%llx, alloc_addr=0x%x)", start_addr, mem_id, flags, alloc_addr); if(!Memory.IsGoodAddr(alloc_addr)) return CELL_EFAULT; // Check if this mem ID is valid. mmapper_info* info; if(!sys_mmapper.CheckId(mem_id, info)) return CELL_ESRCH; // Search for a mappable address. u32 addr; bool found; for (int i = 0; i < SYS_MMAPPER_FIXED_SIZE; i += 0x100000) { addr = start_addr + i; found = Memory.Map(addr, info->addr, info->size); if(found) { sys_mmapper.Warning("Found and mapped address 0x%x", addr); break; } } // Check if the address is valid. if (!Memory.IsGoodAddr(addr) || !found) return CELL_ENOMEM; // Write back the start address of the allocated area. Memory.Write32(alloc_addr, addr); // Keep track of mapped addresses. mmapper_info_map[mem_id] = addr; return CELL_OK; }
s32 sys_rwlock_wlock(u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=%d, timeout=%lld)", rw_lock_id, timeout); RWLock* rw; if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); if (!rw->wlock_check(tid)) return CELL_EDEADLK; if (rw->wlock_trylock(tid, true)) return CELL_OK; u32 counter = 0; const u32 max_counter = timeout ? (timeout / 1000) : 20000; do { if (Emu.IsStopped()) { LOG_WARNING(HLE, "sys_rwlock_wlock(rw_lock_id=%d, ...) aborted", rw_lock_id); return CELL_ETIMEDOUT; } std::this_thread::sleep_for(std::chrono::milliseconds(1)); if (rw->wlock_trylock(tid, true)) return CELL_OK; if (counter++ > max_counter) { if (!timeout) { counter = 0; } else { return CELL_ETIMEDOUT; } } } while (true); }
int sys_rwlock_wlock(u32 rw_lock_id, u64 timeout) { sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=%d, timeout=%lld)", rw_lock_id, timeout); RWLock* rw; if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); if (!rw->wlock_check(tid)) return CELL_EDEADLK; if (rw->wlock_trylock(tid, true)) return CELL_OK; u32 counter = 0; const u32 max_counter = timeout ? (timeout / 1000) : 20000; do { if (Emu.IsStopped()) { ConLog.Warning("sys_rwlock_wlock(rw_lock_id=%d, ...) aborted", rw_lock_id); return CELL_ETIMEDOUT; } Sleep(1); if (rw->wlock_trylock(tid, true)) return CELL_OK; if (counter++ > max_counter) { if (!timeout) { counter = 0; } else { return CELL_ETIMEDOUT; } } } while (true); }
s32 sys_event_flag_cancel(u32 eflag_id, vm::ptr<u32> num) { sys_event_flag.Log("sys_event_flag_cancel(eflag_id=%d, num_addr=0x%x)", eflag_id, num.addr()); EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; std::vector<u32> tids; const u32 tid = GetCurrentPPUThread().GetId(); { ef->m_mutex.lock(tid); tids.resize(ef->waiters.size()); for (u32 i = 0; i < ef->waiters.size(); i++) { tids[i] = ef->waiters[i].tid; } ef->waiters.clear(); ef->m_mutex.unlock(tid); } for (u32 i = 0; i < tids.size(); i++) { ef->signal.lock(tids[i]); } if (Emu.IsStopped()) { sys_event_flag.Warning("sys_event_flag_cancel(id=%d) aborted", eflag_id); return CELL_OK; } if (num) *num = (u32)tids.size(); return CELL_OK; }
s32 sys_event_flag_set(u32 eflag_id, u64 bitptn) { sys_event_flag.Log("sys_event_flag_set(eflag_id=%d, bitptn=0x%llx)", eflag_id, bitptn); EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; u32 tid = GetCurrentPPUThread().GetId(); ef->m_mutex.lock(tid); ef->flags |= bitptn; if (u32 target = ef->check()) { // if signal, leave both mutexes locked... ef->signal.lock(target); ef->m_mutex.unlock(tid, target); } else { ef->m_mutex.unlock(tid); } return CELL_OK; }
int sys_memory_allocate_from_container(u32 size, u32 cid, u32 flags, u32 alloc_addr_addr) { sc_mem.Log("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%x)", size, cid, flags); // Check if this container ID is valid. MemoryContainerInfo* ct; if(!sc_mem.CheckId(cid, ct)) return CELL_ESRCH; // Check page size. switch(flags) { case SYS_MEMORY_PAGE_SIZE_1M: if(size & 0xfffff) return CELL_EALIGN; ct->addr = Memory.Alloc(size, 0x100000); break; case SYS_MEMORY_PAGE_SIZE_64K: if(size & 0xffff) return CELL_EALIGN; ct->addr = Memory.Alloc(size, 0x10000); break; default: return CELL_EINVAL; } // Store the address and size in the container. if(!ct->addr) return CELL_ENOMEM; ct->size = size; // Write back the start address of the allocated area. sc_mem.Log("Memory allocated! [addr: 0x%x, size: 0x%x]", ct->addr, ct->size); Memory.Write32(alloc_addr_addr, ct->addr); return CELL_OK; }
s32 sys_event_flag_wait(u32 eflag_id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 timeout) { sys_event_flag.Log("sys_event_flag_wait(eflag_id=%d, bitptn=0x%llx, mode=0x%x, result_addr=0x%x, timeout=%lld)", eflag_id, bitptn, mode, result.addr(), timeout); if (result) *result = 0; switch (mode & 0xf) { case SYS_EVENT_FLAG_WAIT_AND: break; case SYS_EVENT_FLAG_WAIT_OR: break; default: return CELL_EINVAL; } switch (mode & ~0xf) { case 0: break; // ??? case SYS_EVENT_FLAG_WAIT_CLEAR: break; case SYS_EVENT_FLAG_WAIT_CLEAR_ALL: break; default: return CELL_EINVAL; } EventFlag* ef; if (!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; const u32 tid = GetCurrentPPUThread().GetId(); { ef->m_mutex.lock(tid); if (ef->m_type == SYS_SYNC_WAITER_SINGLE && ef->waiters.size() > 0) { ef->m_mutex.unlock(tid); return CELL_EPERM; } EventFlagWaiter rec; rec.bitptn = bitptn; rec.mode = mode; rec.tid = tid; ef->waiters.push_back(rec); if (ef->check() == tid) { u64 flags = ef->flags; ef->waiters.erase(ef->waiters.end() - 1); if (mode & SYS_EVENT_FLAG_WAIT_CLEAR) { ef->flags &= ~bitptn; } else if (mode & SYS_EVENT_FLAG_WAIT_CLEAR_ALL) { ef->flags = 0; } if (result) *result = flags; ef->m_mutex.unlock(tid); return CELL_OK; } ef->m_mutex.unlock(tid); } u64 counter = 0; const u64 max_counter = timeout ? (timeout / 1000) : ~0; while (true) { if (ef->signal.unlock(tid, tid) == SMR_OK) { ef->m_mutex.lock(tid); u64 flags = ef->flags; for (u32 i = 0; i < ef->waiters.size(); i++) { if (ef->waiters[i].tid == tid) { ef->waiters.erase(ef->waiters.begin() + i); if (mode & SYS_EVENT_FLAG_WAIT_CLEAR) { ef->flags &= ~bitptn; } else if (mode & SYS_EVENT_FLAG_WAIT_CLEAR_ALL) { ef->flags = 0; } if (u32 target = ef->check()) { // if signal, leave both mutexes locked... ef->signal.unlock(tid, target); ef->m_mutex.unlock(tid, target); } else { ef->signal.unlock(tid); } if (result) *result = flags; ef->m_mutex.unlock(tid); return CELL_OK; } } ef->signal.unlock(tid); ef->m_mutex.unlock(tid); return CELL_ECANCELED; } std::this_thread::sleep_for(std::chrono::milliseconds(1)); if (counter++ > max_counter) { ef->m_mutex.lock(tid); for (u32 i = 0; i < ef->waiters.size(); i++) { if (ef->waiters[i].tid == tid) { ef->waiters.erase(ef->waiters.begin() + i); break; } } ef->m_mutex.unlock(tid); return CELL_ETIMEDOUT; } if (Emu.IsStopped()) { sys_event_flag.Warning("sys_event_flag_wait(id=%d) aborted", eflag_id); return CELL_OK; } } }
int sys_event_flag_wait(u32 eflag_id, u64 bitptn, u32 mode, mem64_t result, u64 timeout) { sys_event_flag.Warning("sys_event_flag_wait(eflag_id=%d, bitptn=0x%llx, mode=0x%x, result_addr=0x%x, timeout=%lld)", eflag_id, bitptn, mode, result.GetAddr(), timeout); if (result.IsGood()) result = 0; switch (mode & 0xf) { case SYS_EVENT_FLAG_WAIT_AND: break; case SYS_EVENT_FLAG_WAIT_OR: break; default: return CELL_EINVAL; } switch (mode & ~0xf) { case 0: break; // ??? case SYS_EVENT_FLAG_WAIT_CLEAR: break; case SYS_EVENT_FLAG_WAIT_CLEAR_ALL: break; default: return CELL_EINVAL; } EventFlag* ef; if(!sys_event_flag.CheckId(eflag_id, ef)) return CELL_ESRCH; u32 tid = GetCurrentPPUThread().GetId(); { SMutexLocker lock(ef->m_mutex); if (ef->m_type == SYS_SYNC_WAITER_SINGLE && ef->waiters.GetCount() > 0) { return CELL_EPERM; } EventFlagWaiter rec; rec.bitptn = bitptn; rec.mode = mode; rec.tid = tid; ef->waiters.AddCpy(rec); if (ef->check() == tid) { u64 flags = ef->flags; ef->waiters.RemoveAt(ef->waiters.GetCount() - 1); if (mode & SYS_EVENT_FLAG_WAIT_CLEAR) { ef->flags &= ~bitptn; } else if (mode & SYS_EVENT_FLAG_WAIT_CLEAR_ALL) { ef->flags = 0; } if (result.IsGood()) { result = flags; return CELL_OK; } if (!result.GetAddr()) { return CELL_OK; } return CELL_EFAULT; } } u32 counter = 0; const u32 max_counter = timeout ? (timeout / 1000) : ~0; while (true) { if (ef->signal.GetOwner() == tid) { SMutexLocker lock(ef->m_mutex); ef->signal.unlock(tid); u64 flags = ef->flags; for (u32 i = 0; i < ef->waiters.GetCount(); i++) { if (ef->waiters[i].tid == tid) { ef->waiters.RemoveAt(i); if (mode & SYS_EVENT_FLAG_WAIT_CLEAR) { ef->flags &= ~bitptn; } else if (mode & SYS_EVENT_FLAG_WAIT_CLEAR_ALL) { ef->flags = 0; } if (result.IsGood()) { result = flags; return CELL_OK; } if (!result.GetAddr()) { return CELL_OK; } return CELL_EFAULT; } } return CELL_ECANCELED; } Sleep(1); if (counter++ > max_counter) { SMutexLocker lock(ef->m_mutex); for (u32 i = 0; i < ef->waiters.GetCount(); i++) { if (ef->waiters[i].tid == tid) { ef->waiters.RemoveAt(i); break; } } return CELL_ETIMEDOUT; } if (Emu.IsStopped()) { ConLog.Warning("sys_event_flag_wait(id=%d) aborted", eflag_id); return CELL_OK; } } }