s32 cellFsStReadGetCurrentAddr(u32 fd, vm::ptr<u32> addr, vm::ptr<u64> size) { cellFs.Warning("cellFsStReadGetCurrentAddr(fd=%d, addr=*0x%x, size=*0x%x)", fd, addr, size); const auto file = idm::get<lv2_file_t>(fd); if (!file) { return CELL_FS_EBADF; } if (file->st_status.load() == SSS_NOT_INITIALIZED || !file->st_copyless) { return CELL_FS_ENXIO; } const u64 copied = file->st_copied.load(); const u32 position = VM_CAST(file->st_buffer + copied % file->st_ringbuf_size); const u64 total_read = file->st_total_read.load(); if ((*size = std::min<u64>(file->st_ringbuf_size - (position - file->st_buffer), total_read - copied)).data()) { *addr = position; } else { *addr = 0; } // check end of stream return total_read < file->st_read_size ? CELL_OK : CELL_FS_ERANGE; }
s32 cellFsStRead(u32 fd, vm::ptr<u8> buf, u64 size, vm::ptr<u64> rsize) { cellFs.Warning("cellFsStRead(fd=%d, buf=*0x%x, size=0x%llx, rsize=*0x%x)", fd, buf, size, rsize); const auto file = idm::get<lv2_file_t>(fd); if (!file) { return CELL_FS_EBADF; } if (file->st_status.load() == SSS_NOT_INITIALIZED || file->st_copyless) { return CELL_FS_ENXIO; } const u64 copied = file->st_copied.load(); const u32 position = VM_CAST(file->st_buffer + copied % file->st_ringbuf_size); const u64 total_read = file->st_total_read.load(); const u64 copy_size = (*rsize = std::min<u64>(size, total_read - copied)); // write rsize // copy data const u64 first_size = std::min<u64>(copy_size, file->st_ringbuf_size - (position - file->st_buffer)); memcpy(buf.get_ptr(), vm::get_ptr(position), first_size); memcpy((buf + first_size).get_ptr(), vm::get_ptr(file->st_buffer), copy_size - first_size); // notify file->st_copied += copy_size; file->cv.notify_one(); // check end of stream return total_read < file->st_read_size ? CELL_OK : CELL_FS_ERANGE; }
void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args) { if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK)) { _mm_mfence(); } u32 eal = VM_CAST(args.ea); if (eal >= SYS_SPU_THREAD_BASE_LOW && m_type == CPU_THREAD_SPU) // SPU Thread Group MMIO (LS and SNR) { const u32 index = (eal - SYS_SPU_THREAD_BASE_LOW) / SYS_SPU_THREAD_OFFSET; // thread number in group const u32 offset = (eal - SYS_SPU_THREAD_BASE_LOW) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register const auto group = tg.lock(); if (group && index < group->num && group->threads[index]) { auto& spu = static_cast<SPUThread&>(*group->threads[index]); if (offset + args.size - 1 < 0x40000) // LS access { eal = spu.offset + offset; // redirect access } else if ((cmd & MFC_PUT_CMD) && args.size == 4 && (offset == SYS_SPU_THREAD_SNR1 || offset == SYS_SPU_THREAD_SNR2)) { spu.push_snr(SYS_SPU_THREAD_SNR2 == offset, read32(args.lsa)); return; } else { throw EXCEPTION("Invalid MMIO offset (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", cmd, args.lsa, args.ea, args.tag, args.size); } } else { throw EXCEPTION("Invalid thread type (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", cmd, args.lsa, args.ea, args.tag, args.size); } } switch (cmd & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK)) { case MFC_PUT_CMD: case MFC_PUTR_CMD: { memcpy(vm::get_ptr(eal), vm::get_ptr(offset + args.lsa), args.size); return; } case MFC_GET_CMD: { memcpy(vm::get_ptr(offset + args.lsa), vm::get_ptr(eal), args.size); return; } } throw EXCEPTION("Invalid command %s (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", get_mfc_cmd_name(cmd), cmd, args.lsa, args.ea, args.tag, args.size); }
u64 vfsStreamMemory::Write(const void* src, u64 count) { assert(m_pos < m_size); if (m_pos + count > m_size) { count = m_size - m_pos; } memcpy(vm::get_ptr<void>(VM_CAST(m_addr + m_pos)), src, count); m_pos += count; return count; }
u64 vfsStreamMemory::Read(void* dst, u64 count) { assert(m_pos < m_size); if (m_pos + count > m_size) { count = m_size - m_pos; } memcpy(dst, vm::get_ptr<void>(VM_CAST(m_addr + m_pos)), count); m_pos += count; return count; }
s32 cellFsStReadStart(u32 fd, u64 offset, u64 size) { cellFs.Warning("cellFsStReadStart(fd=%d, offset=0x%llx, size=0x%llx)", fd, offset, size); const auto file = idm::get<lv2_file_t>(fd); if (!file) { return CELL_FS_EBADF; } switch (auto status = file->st_status.compare_and_swap(SSS_INITIALIZED, SSS_STARTED)) { case SSS_NOT_INITIALIZED: { return CELL_FS_ENXIO; } case SSS_STARTED: { return CELL_FS_EBUSY; } } offset = std::min<u64>(file->file->GetSize(), offset); size = std::min<u64>(file->file->GetSize() - offset, size); file->st_read_size = size; file->st_thread.start([=]{ return fmt::format("FS ST Thread[0x%x]", fd); }, [=]() { std::unique_lock<std::mutex> lock(file->mutex); while (file->st_status.load() == SSS_STARTED && !Emu.IsStopped()) { // check free space in buffer and available data in stream if (file->st_total_read - file->st_copied <= file->st_ringbuf_size - file->st_block_size && file->st_total_read < file->st_read_size) { // get buffer position const u32 position = VM_CAST(file->st_buffer + file->st_total_read % file->st_ringbuf_size); // read data auto old = file->file->Tell(); file->file->Seek(offset + file->st_total_read); auto res = file->file->Read(vm::get_ptr(position), file->st_block_size); file->file->Seek(old); // notify file->st_total_read += res; file->cv.notify_one(); } // check callback condition if set if (file->st_callback.data.func) { const u64 available = file->st_total_read - file->st_copied; if (available >= file->st_callback.data.size) { const auto func = file->st_callback.exchange({}).func; Emu.GetCallbackManager().Async([=](CPUThread& ppu) { func(static_cast<PPUThread&>(ppu), fd, available); }); } } file->cv.wait_for(lock, std::chrono::milliseconds(1)); } file->st_status.compare_and_swap(SSS_STOPPED, SSS_INITIALIZED); file->st_read_size = 0; file->st_total_read = 0; file->st_copied = 0; file->st_callback.data = {}; }); return CELL_OK; }
void SPUThread::process_mfc_cmd(u32 cmd) { if (Ini.HLELogging.GetValue()) { LOG_NOTICE(SPU, "DMA %s: cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x", get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size); } switch (cmd) { case MFC_PUT_CMD: case MFC_PUTB_CMD: case MFC_PUTF_CMD: case MFC_PUTR_CMD: case MFC_PUTRB_CMD: case MFC_PUTRF_CMD: case MFC_GET_CMD: case MFC_GETB_CMD: case MFC_GETF_CMD: { return do_dma_transfer(cmd, ch_mfc_args); } case MFC_PUTL_CMD: case MFC_PUTLB_CMD: case MFC_PUTLF_CMD: case MFC_PUTRL_CMD: case MFC_PUTRLB_CMD: case MFC_PUTRLF_CMD: case MFC_GETL_CMD: case MFC_GETLB_CMD: case MFC_GETLF_CMD: { return do_dma_list_cmd(cmd, ch_mfc_args); } case MFC_GETLLAR_CMD: // acquire reservation { if (ch_mfc_args.size != 128) { break; } const u32 raddr = VM_CAST(ch_mfc_args.ea); vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), raddr, 128); if (last_raddr) { ch_event_stat |= SPU_EVENT_LR; } last_raddr = raddr; return ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS); } case MFC_PUTLLC_CMD: // store conditionally { if (ch_mfc_args.size != 128) { break; } if (vm::reservation_update(VM_CAST(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128)) { if (last_raddr == 0) { throw EXCEPTION("Unexpected: PUTLLC command succeeded, but GETLLAR command not detected"); } last_raddr = 0; return ch_atomic_stat.set_value(MFC_PUTLLC_SUCCESS); } else { if (last_raddr != 0) { ch_event_stat |= SPU_EVENT_LR; last_raddr = 0; } return ch_atomic_stat.set_value(MFC_PUTLLC_FAILURE); } } case MFC_PUTLLUC_CMD: // store unconditionally case MFC_PUTQLLUC_CMD: { if (ch_mfc_args.size != 128) { break; } vm::reservation_op(VM_CAST(ch_mfc_args.ea), 128, [this]() { std::memcpy(vm::priv_ptr(VM_CAST(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128); }); if (last_raddr != 0 && vm::g_tls_did_break_reservation) { ch_event_stat |= SPU_EVENT_LR; last_raddr = 0; } if (cmd == MFC_PUTLLUC_CMD) { ch_atomic_stat.set_value(MFC_PUTLLUC_SUCCESS); } return; } } throw EXCEPTION("Unknown command %s (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size); }
u64 PPUThread::get_stack_arg(s32 i) { return vm::read64(VM_CAST(GPR[1] + 0x70 + 0x8 * (i - 9))); }