zx_status_t ufshc_send_uic_command(volatile void* regs, uint32_t command, uint32_t arg1, uint32_t arg3) { uint32_t reg_val; zx_time_t deadline = zx_clock_get_monotonic() + ZX_MSEC(100); while (true) { if (readl(regs + REG_CONTROLLER_STATUS) & UFS_HCS_UCRDY) break; if (zx_clock_get_monotonic() > deadline) { UFS_ERROR("UFS HC not ready!\n"); return ZX_ERR_TIMED_OUT; } zx_nanosleep(zx_deadline_after(ZX_MSEC(1))); } writel(UFS_IS_UCCS_BIT | UFS_IS_UE_BIT, regs + REG_INTERRUPT_STATUS); writel(arg1, regs + REG_UIC_COMMAND_ARG_1); writel(0x0, regs + REG_UIC_COMMAND_ARG_2); writel(arg3, regs + REG_UIC_COMMAND_ARG_3); writel(command & 0xFF, regs + REG_UIC_COMMAND); deadline = zx_clock_get_monotonic() + ZX_MSEC(500); while (true) { if (readl(regs + REG_INTERRUPT_STATUS) & UFS_IS_UCCS_BIT) break; if (zx_clock_get_monotonic() > deadline) { UFS_ERROR("UFS_IS_UCCS_BIT not ready!\n"); return ZX_ERR_TIMED_OUT; } zx_nanosleep(zx_deadline_after(ZX_MSEC(1))); } // clear interrupt status writel(UFS_IS_UCCS_BIT, regs + REG_INTERRUPT_STATUS); reg_val = readl(regs + REG_UIC_COMMAND_ARG_2) & 0xFF; if (reg_val) { UFS_ERROR("Response ERROR!\n"); return ZX_ERR_BAD_STATE; } reg_val = readl(regs + REG_INTERRUPT_STATUS) & UFS_IS_UE_BIT; if (reg_val) { UFS_ERROR("UFS_IS_UE_BIT ERROR!\n"); return ZX_ERR_BAD_STATE; } return ZX_OK; }
int LoadGeneratorThread::Run() { constexpr double kMinNum = 1.0; constexpr double kMaxNum = 100000000.0; uint32_t ticks_per_msec = static_cast<uint32_t>(zx_ticks_per_second() / 1000); accumulator_ = MakeRandomDouble(kMinNum, kMaxNum); // While it is not time to quit, waste time performing pointless double // precision floating point math. while (!quit_) { double work_delay = MakeRandomDouble(min_work_msec(), max_work_msec()); zx_ticks_t work_deadline_ticks = zx_ticks_get() + static_cast<zx_ticks_t>(work_delay * ticks_per_msec); while (!quit_ && (zx_ticks_get() < work_deadline_ticks)) { accumulator_ += MakeRandomDouble(kMinNum, kMaxNum); accumulator_ *= MakeRandomDouble(kMinNum, kMaxNum); accumulator_ -= MakeRandomDouble(kMinNum, kMaxNum); accumulator_ /= MakeRandomDouble(kMinNum, kMaxNum); double tmp = accumulator_; accumulator_ = fbl::clamp<double>(tmp, 0.0, kMaxNum); } if (quit_) break; double sleep_delay = MakeRandomDouble(min_sleep_msec(), max_sleep_msec()); zx_time_t sleep_deadline = zx_deadline_after(static_cast<zx_duration_t>(sleep_delay * 1000000.0)); do { static constexpr zx_duration_t max_sleep = ZX_MSEC(10); zx_time_t now = zx_clock_get_monotonic(); if (now >= sleep_deadline) break; if (zx_time_sub_time(sleep_deadline, now) > max_sleep) { zx_nanosleep(zx_time_add_duration(now, max_sleep)); } else { zx_nanosleep(sleep_deadline); break; } } while (!quit_); } return 0; }
static zx_status_t ufshc_send_nop_out_cmd(ufs_hba_t* hba, volatile void* regs) { uint32_t i; uint8_t free_slot; zx_status_t status = ZX_OK; for (i = 0; i < NOP_RETRY_COUNT; i++) { free_slot = ufshc_get_xfer_free_slot(hba); if (BAD_SLOT == free_slot) return ZX_ERR_NO_RESOURCES; ufs_create_nop_out_upiu(hba, free_slot); // Flush and invalidate cache before we start transfer ufshc_flush_and_invalidate_descs(hba); status = ufshc_wait_for_cmd_completion(hba, (1 << free_slot), regs); if (status == ZX_OK) { if ((status = ufshc_read_nop_resp(hba, free_slot)) == ZX_OK) break; } zx_nanosleep(zx_deadline_after(ZX_MSEC(10))); } if (i == NOP_RETRY_COUNT) UFS_ERROR("UFS NOP resposne FAIL! slot=0x%x status=%d.\n", free_slot, status); return status; }
void ufshc_check_h8(volatile void* regs) { uint32_t tx_fsm_val_0; uint32_t tx_fsm_val_1; uint32_t i; // Unipro VS_mphy_disable tx_fsm_val_0 = ufshc_uic_cmd_read(regs, DME_GET, UPRO_MPHY_CTRL); if (tx_fsm_val_0 != 0x1) UFS_WARN("Unipro VS_mphy_disable is 0x%x!\n", tx_fsm_val_0); ufshc_send_uic_command(regs, DME_SET, UPRO_MPHY_CTRL, 0x0); for (i = 0; i < MPHY_TX_FSM_RETRY_COUNT; i++) { // MPHY TX_FSM_State TX0 tx_fsm_val_0 = ufshc_uic_cmd_read(regs, DME_GET, UPRO_MPHY_FSM_TX0); // MPHY TX_FSM_State TX1 tx_fsm_val_1 = ufshc_uic_cmd_read(regs, DME_GET, UPRO_MPHY_FSM_TX1); if ((tx_fsm_val_0 == 0x1) && (tx_fsm_val_1 == 0x1)) { UFS_DBG("tx_fsm_val_0=0x%x tx_fsm_val_1=0x%x.\n", tx_fsm_val_0, tx_fsm_val_1); break; } zx_nanosleep(zx_deadline_after(ZX_MSEC(2))); } if (i == MPHY_TX_FSM_RETRY_COUNT) UFS_WARN("MPHY TX_FSM state wait H8 timeout!\n"); }
zx_status_t sdmmc_request_helper(sdmmc_device_t* dev, sdmmc_req_t* req, uint8_t retries, uint32_t wait_time) { zx_status_t st; while (((st = sdmmc_request(&dev->host, req)) != ZX_OK) && retries > 0) { retries--; zx_nanosleep(zx_deadline_after(ZX_MSEC(wait_time))); } return st; }
static int thread_entry(void* arg) { int thread_number = (int)(intptr_t)arg; errno = thread_number; unittest_printf("thread %d sleeping for .1 seconds\n", thread_number); zx_nanosleep(zx_deadline_after(ZX_MSEC(100))); EXPECT_EQ(errno, thread_number, "errno changed by someone!"); threads_done[thread_number] = 1; return thread_number; }
static noreturn void do_powerctl(zx_handle_t log, zx_handle_t rroot, uint32_t reason) { const char* r_str = (reason == ZX_SYSTEM_POWERCTL_SHUTDOWN) ? "poweroff" : "reboot"; if (reason == ZX_SYSTEM_POWERCTL_REBOOT) { printl(log, "Waiting 3 seconds..."); zx_nanosleep(zx_deadline_after(ZX_SEC(3u))); } printl(log, "Process exited. Executing \"%s\".", r_str); zx_system_powerctl(rroot, reason, NULL); printl(log, "still here after %s!", r_str); while (true) __builtin_trap(); }
/* * Program a region into the outbound ATU * The ATU supports 16 regions that can be programmed independently. * pcie, PCIe Device Struct * index, Which iATU region are we programming? * type, Type of PCIe txn being generated on the PCIe bus * cpu_addr, Physical source address to translate in the CPU's address space * pci_addr, Destination Address in the PCIe address space * size Size of the aperature that we're translating. */ zx_status_t DwPcie::ProgramOutboundAtu(const uint32_t index, const uint32_t type, const zx_paddr_t cpu_addr, const uintptr_t pci_addr, const size_t size) { // The ATU supports a limited number of regions. ZX_DEBUG_ASSERT(index < kAtuRegionCount); // Each ATU region has its own bank of registers at this offset from the // DBI base const size_t bank_offset = (0x3 << 20) | (index << 9); volatile uint8_t* atu_base = reinterpret_cast<volatile uint8_t*>(dbi_.get()) + bank_offset; volatile atu_ctrl_regs_t* regs = reinterpret_cast<volatile atu_ctrl_regs_t*>(atu_base); // Memory transactions that are in the following range will get translated // to PCI bus transactions: // // [cpu_addr, cpu_addr + size - 1] regs->unroll_lower_base = lo32(cpu_addr); regs->unroll_upper_base = hi32(cpu_addr); regs->unroll_limit = lo32(cpu_addr + size - 1); // Target of the transactions above. regs->unroll_lower_target = lo32(pci_addr); regs->unroll_upper_target = hi32(pci_addr); // Region Ctrl 1 contains a number of fields. The Low 5 bits of the field // indicate the type of transaction to dispatch onto the PCIe bus. regs->region_ctrl1 = type; // Each region can individually be marked as Enabled or Disabled. regs->region_ctrl2 |= kAtuRegionCtrlEnable; regs->region_ctrl2 |= kAtuCfgShiftMode; // Wait for the enable to take effect. for (unsigned int i = 0; i < kAtuProgramRetries; ++i) { if (regs->region_ctrl2 & kAtuRegionCtrlEnable) { return ZX_OK; } // Wait a little bit before trying again. zx_nanosleep(zx_deadline_after(ZX_USEC(kAtuWaitEnableTimeoutUs))); } return ZX_ERR_TIMED_OUT; }
bool c11_thread_test(void) { BEGIN_TEST; thrd_t thread; int return_value = 99; unittest_printf("Welcome to thread test!\n"); memset((void*)threads_done, 0, sizeof(threads_done)); for (int i = 0; i != 4; ++i) { int return_value = 99; int ret = thrd_create_with_name(&thread, thread_entry, (void*)(intptr_t)i, "c11 thread test"); ASSERT_EQ(ret, thrd_success, "Error while creating thread"); ret = thrd_join(thread, &return_value); ASSERT_EQ(ret, thrd_success, "Error while thread join"); ASSERT_EQ(return_value, i, "Incorrect return from thread"); } unittest_printf("Attempting to create thread with a null name. This should succeed\n"); int ret = thrd_create_with_name(&thread, thread_entry, (void*)(intptr_t)4, NULL); ASSERT_EQ(ret, thrd_success, "Error returned from thread creation"); zx_handle_t handle = thrd_get_zx_handle(thread); ASSERT_NE(handle, ZX_HANDLE_INVALID, "got invalid thread handle"); // Prove this is a valid handle by duplicating it. zx_handle_t dup_handle; zx_status_t status = zx_handle_duplicate(handle, ZX_RIGHT_SAME_RIGHTS, &dup_handle); ASSERT_EQ(status, 0, "failed to duplicate thread handle"); ret = thrd_join(thread, &return_value); ASSERT_EQ(ret, thrd_success, "Error while thread join"); ASSERT_EQ(zx_handle_close(dup_handle), ZX_OK, "failed to close duplicate handle"); ASSERT_EQ(return_value, 4, "Incorrect return from thread"); ret = thrd_create_with_name(&thread, thread_entry, (void*)(intptr_t)5, NULL); ASSERT_EQ(ret, thrd_success, "Error returned from thread creation"); ret = thrd_detach(thread); ASSERT_EQ(ret, thrd_success, "Error while thread detach"); while (!threads_done[5]) zx_nanosleep(zx_deadline_after(ZX_MSEC(100))); thread_entry((void*)(intptr_t)6); ASSERT_TRUE(threads_done[6], "All threads should have completed"); END_TEST; }
static zx_status_t ufshc_enable(ufshc_dev_t* dev) { int32_t retry = 3; volatile void* regs = dev->ufshc_mmio.vaddr; zx_status_t status = ZX_OK; do { writel(CONTROLLER_ENABLE, regs + REG_CONTROLLER_ENABLE); zx_nanosleep(zx_deadline_after(ZX_MSEC(5))); // wait for the host controller to complete initialization if ((status = ufshc_wait_for_active(regs, CONTROLLER_ENABLE, ZX_SEC(1))) == ZX_OK) break; } while (--retry > 0); if (!retry) UFS_ERROR("Controller not active status=%d.\n", status); return status; }
void Osd::FlipOnVsync(uint8_t idx) { // Get the first available channel int rdma_channel = GetNextAvailableRdmaChannel(); uint8_t retry_count = 0; while (rdma_channel == -1 && retry_count++ < kMaxRetries) { zx_nanosleep(zx_deadline_after(ZX_MSEC(1))); rdma_channel = GetNextAvailableRdmaChannel(); } if (rdma_channel < 0) { ZX_DEBUG_ASSERT(false); return; } DISP_SPEW("Channel used is %d\n", rdma_channel); // Update CFG_W0 with correct Canvas Index uint32_t cfg_w0 = (idx << VpuViuOsd1BlkCfgTblAddrShift) | VpuViuOsd1BlkCfgLittleEndian | (VpuViuOsd1BlkCfgOsdBlkMode32Bit << VpuViuOsd1BlkCfgOsdBlkModeShift) | (VpuViuOsd1BlkCfgColorMatrixArgb << VpuViuOsd1BlkCfgColorMatrixShift); SetRdmaTableValue(rdma_channel, IDX_CFG_W0, cfg_w0); SetRdmaTableValue(rdma_channel, IDX_CTRL_STAT, vpu_mmio_->Read32(VPU_VIU_OSD1_CTRL_STAT) | (1 << 0)); FlushRdmaTable(rdma_channel); // Write the start and end address of the table. End address is the last address that the // RDMA engine reads from. vpu_mmio_->Write32(static_cast<uint32_t> (rdma_chnl_container_[rdma_channel].phys_offset), VPU_RDMA_AHB_START_ADDR(rdma_channel)); vpu_mmio_->Write32(static_cast<uint32_t>(rdma_chnl_container_[rdma_channel].phys_offset + (sizeof(RdmaTable) * kRdmaTableMaxSize) - 4), VPU_RDMA_AHB_END_ADDR(rdma_channel)); // Enable Auto mode: Non-Increment, VSync Interrupt Driven, Write uint32_t regVal = vpu_mmio_->Read32(VPU_RDMA_ACCESS_AUTO); regVal |= RDMA_ACCESS_AUTO_INT_EN(rdma_channel); // VSYNC interrupt source regVal |= RDMA_ACCESS_AUTO_WRITE(rdma_channel); // Write vpu_mmio_->Write32(regVal, VPU_RDMA_ACCESS_AUTO); }
static zx_status_t ufshc_wait_for_active(volatile void* regs, const uint32_t mask, zx_time_t timeout) { zx_time_t deadline = zx_clock_get_monotonic() + timeout; while (true) { uint32_t reg_value = readl(regs + REG_CONTROLLER_ENABLE); if ((reg_value & mask) == 1) { UFS_DBG("UFS HC controller is active.\n"); break; } UFS_DBG("UFS HC CTRL_EN=0x%x.\n", reg_value); if (zx_clock_get_monotonic() > deadline) { UFS_ERROR("UFS HC: timed out while waiting for reset!\n"); return ZX_ERR_TIMED_OUT; } zx_nanosleep(zx_deadline_after(ZX_USEC(5))); } return ZX_OK; }
static zx_status_t ufshc_link_startup(volatile void* regs) { int32_t retry = 4; uint32_t i; writel(0xFFFFFFFF, regs + REG_INTERRUPT_STATUS); while (retry-- > 0) { if (readl(regs + REG_INTERRUPT_STATUS) & UFS_IS_UCCS_BIT) writel(UFS_IS_UCCS_BIT, regs + REG_INTERRUPT_STATUS); // UFS link startup begin writel(0, regs + REG_UIC_COMMAND_ARG_1); writel(0, regs + REG_UIC_COMMAND_ARG_2); writel(0, regs + REG_UIC_COMMAND_ARG_3); writel(UIC_LINK_STARTUP_CMD & 0xFF, regs + REG_UIC_COMMAND); for (i = 0; i <= LINK_STARTUP_UCCS_RETRY_COUNT; i++) { if (readl(regs + REG_INTERRUPT_STATUS) & UFS_IS_UCCS_BIT) { writel(UFS_IS_UCCS_BIT, regs + REG_INTERRUPT_STATUS); UFS_DBG("UFS HC Link INT status OK.\n"); break; } zx_nanosleep(zx_deadline_after(ZX_MSEC(2))); } if (readl(regs + REG_CONTROLLER_STATUS) & UFS_HCS_DP_BIT) { writel(UFS_IS_UE_BIT, regs + REG_INTERRUPT_STATUS); if (readl(regs + REG_CONTROLLER_STATUS) & UFS_IS_ULSS_BIT) writel(UFS_IS_ULSS_BIT, regs + REG_INTERRUPT_STATUS); UFS_DBG("UFS HC link_startup startup OK.\n"); ufshc_reg_read_clear(regs); return ZX_OK; } } UFS_ERROR("UFS HC link_startup startup Error!\n"); return ZX_ERR_TIMED_OUT; }
static zx_status_t ufshc_host_init(ufshc_dev_t* dev) { volatile void* regs = dev->ufshc_mmio.vaddr; ufs_hba_t* hba = &dev->ufs_hba; zx_status_t status; // Read capabilities registers ufshc_read_capabilities(hba, regs); // Get UFS version supported by the controller ufshc_get_ufs_version(hba, regs); status = ufshc_pre_link_startup(hba, regs); if (status != ZX_OK) return status; zx_nanosleep(zx_deadline_after(ZX_MSEC(50))); status = ufshc_link_startup(regs); if (status != ZX_OK) return status; status = ufshc_drv_init(dev); return status; }
int main(int argc, char** argv) { bool cpu_stats = false; bool mem_stats = false; zx_duration_t delay = ZX_SEC(1); int num_loops = -1; bool timestamp = false; int c; while ((c = getopt(argc, argv, "cd:n:hmt")) > 0) { switch (c) { case 'c': cpu_stats = true; break; case 'd': delay = ZX_SEC(atoi(optarg)); if (delay == 0) { fprintf(stderr, "Bad -d value '%s'\n", optarg); print_help(stderr); return 1; } break; case 'n': num_loops = atoi(optarg); if (num_loops == 0) { fprintf(stderr, "Bad -n value '%s'\n", optarg); print_help(stderr); return 1; } break; case 'h': print_help(stdout); return 0; case 'm': mem_stats = true; break; case 't': timestamp = true; break; default: fprintf(stderr, "Unknown option\n"); print_help(stderr); return 1; } } if (!cpu_stats && !mem_stats) { fprintf(stderr, "No statistics selected\n"); print_help(stderr); return 1; } zx_handle_t root_resource; zx_status_t ret = get_root_resource(&root_resource); if (ret != ZX_OK) { return ret; } // set stdin to non blocking so we can intercept ctrl-c. // TODO: remove once ctrl-c works in the shell fcntl(STDIN_FILENO, F_SETFL, O_NONBLOCK); for (;;) { zx_time_t next_deadline = zx_deadline_after(delay); // Print the current UTC time with milliseconds as // an ISO 8601 string. if (timestamp) { struct timespec now; timespec_get(&now, TIME_UTC); struct tm nowtm; gmtime_r(&now.tv_sec, &nowtm); char tbuf[40]; strftime(tbuf, sizeof(tbuf), "%FT%T", &nowtm); printf("\n--- %s.%03ldZ ---\n", tbuf, now.tv_nsec / (1000 * 1000)); } if (cpu_stats) { ret |= cpustats(root_resource, delay); } if (mem_stats) { ret |= memstats(root_resource); } if (ret != ZX_OK) break; if (num_loops > 0) { if (--num_loops == 0) { break; } } else { // TODO: replace once ctrl-c works in the shell char c; int err; while ((err = read(STDIN_FILENO, &c, 1)) > 0) { if (c == 0x3) return 0; } } zx_nanosleep(next_deadline); } zx_handle_close(root_resource); return ret; }
void AmlEthernet::EthBoardResetPhy() { gpios_[PHY_RESET].Write(0); zx_nanosleep(zx_deadline_after(ZX_MSEC(100))); gpios_[PHY_RESET].Write(1); zx_nanosleep(zx_deadline_after(ZX_MSEC(100))); }
int nanospam(void) { for (;;) { zx_nanosleep(1); } }
WEAK void halide_thread_yield() { zx_nanosleep(0); }
zx_status_t AudioOutput::Play(AudioSource& source) { zx_status_t res; if (source.finished()) return ZX_OK; AudioSource::Format format; res = source.GetFormat(&format); if (res != ZX_OK) { printf("Failed to get source's format (res %d)\n", res); return res; } res = SetFormat(format.frame_rate, format.channels, format.sample_format); if (res != ZX_OK) { printf("Failed to set source format [%u Hz, %hu Chan, %08x fmt] (res %d)\n", format.frame_rate, format.channels, format.sample_format, res); return res; } // ALSA under QEMU required huge buffers. // // TODO(johngro) : Add the ability to determine what type of read-ahead the // HW is going to require so we can adjust our buffer size to what the HW // requires, not what ALSA under QEMU requires. res = GetBuffer(480 * 20 * 3, 3); if (res != ZX_OK) { printf("Failed to set output format (res %d)\n", res); return res; } memset(rb_virt_, 0, rb_sz_); auto buf = reinterpret_cast<uint8_t*>(rb_virt_); uint32_t rd, wr; uint32_t playout_rd, playout_amt; bool started = false; rd = wr = 0; playout_rd = playout_amt = 0; while (true) { uint32_t bytes_read, junk; audio_rb_position_notify_t pos_notif; zx_signals_t sigs; // Top up the buffer. In theory, we should only need to loop 2 times in // order to handle a ring discontinuity for (uint32_t i = 0; i < 2; ++i) { uint32_t space = (rb_sz_ + rd - wr - 1) % rb_sz_; uint32_t todo = fbl::min(space, rb_sz_ - wr); ZX_DEBUG_ASSERT(space < rb_sz_); if (!todo) break; if (source.finished()) { memset(buf + wr, 0, todo); zx_cache_flush(buf + wr, todo, ZX_CACHE_FLUSH_DATA); wr += todo; } else { uint32_t done; res = source.GetFrames(buf + wr, fbl::min(space, rb_sz_ - wr), &done); if (res != ZX_OK) { printf("Error packing frames (res %d)\n", res); break; } zx_cache_flush(buf + wr, done, ZX_CACHE_FLUSH_DATA); wr += done; if (source.finished()) { playout_rd = rd; playout_amt = (rb_sz_ + wr - rd) % rb_sz_; // We have just become finished. Reset the loop counter and // start over, this time filling with as much silence as we // can. i = 0; } } if (wr < rb_sz_) break; ZX_DEBUG_ASSERT(wr == rb_sz_); wr = 0; } if (res != ZX_OK) break; // If we have not started yet, do so. if (!started) { res = StartRingBuffer(); if (res != ZX_OK) { printf("Failed to start ring buffer!\n"); break; } started = true; } res = rb_ch_.wait_one(ZX_CHANNEL_READABLE | ZX_CHANNEL_PEER_CLOSED, zx::time::infinite(), &sigs); if (res != ZX_OK) { printf("Failed to wait for notificiation (res %d)\n", res); break; } if (sigs & ZX_CHANNEL_PEER_CLOSED) { printf("Peer closed connection during playback!\n"); break; } res = rb_ch_.read(0, &pos_notif, sizeof(pos_notif), &bytes_read, nullptr, 0, &junk); if (res != ZX_OK) { printf("Failed to read notification from ring buffer channel (res %d)\n", res); break; } if (bytes_read != sizeof(pos_notif)) { printf("Bad size when reading notification from ring buffer channel (%u != %zu)\n", bytes_read, sizeof(pos_notif)); res = ZX_ERR_INTERNAL; break; } if (pos_notif.hdr.cmd != AUDIO_RB_POSITION_NOTIFY) { printf("Unexpected command type when reading notification from ring " "buffer channel (cmd %04x)\n", pos_notif.hdr.cmd); res = ZX_ERR_INTERNAL; break; } rd = pos_notif.ring_buffer_pos; // rd has moved. If the source has finished and rd has moved at least // the playout distance, we are finsihed. if (source.finished()) { uint32_t dist = (rb_sz_ + rd - playout_rd) % rb_sz_; if (dist >= playout_amt) break; playout_amt -= dist; playout_rd = rd; } } if (res == ZX_OK) { // We have already let the DMA engine catch up, but we still need to // wait for the fifo to play out. For now, just hard code this as // 30uSec. // // TODO: base this on the start time and the number of frames queued // instead of just making a number up. zx_nanosleep(zx_deadline_after(ZX_MSEC(30))); } zx_status_t stop_res = StopRingBuffer(); if (res == ZX_OK) res = stop_res; return res; }