void PlayAudioEngine::closeOutputStream(){

  if (playStream_ != nullptr){
    aaudio_result_t result = AAudioStream_requestStop(playStream_);
    if (result != AAUDIO_OK){
      LOGE("Error stopping output stream. %s", AAudio_convertResultToText(result));
    }

    result = AAudioStream_close(playStream_);
    if (result != AAUDIO_OK){
      LOGE("Error closing output stream. %s", AAudio_convertResultToText(result));
    }
  }
}
/**
 * Calculate the current latency between writing a frame to the output stream and
 * the same frame being presented to the audio hardware.
 *
 * Here's how the calculation works:
 *
 * 1) Get the time a particular frame was presented to the audio hardware
 * @see AAudioStream_getTimestamp
 * 2) From this extrapolate the time which the *next* audio frame written to the stream
 * will be presented
 * 3) Assume that the next audio frame is written at the current time
 * 4) currentLatency = nextFramePresentationTime - nextFrameWriteTime
 *
 * @param stream The stream being written to
 * @param latencyMillis pointer to a variable to receive the latency in milliseconds between
 * writing a frame to the stream and that frame being presented to the audio hardware.
 * @return AAUDIO_OK or a negative error. It is normal to receive an error soon after a stream
 * has started because the timestamps are not yet available.
 */
aaudio_result_t
PlayAudioEngine::calculateCurrentOutputLatencyMillis(AAudioStream *stream, double *latencyMillis) {

  // Get the time that a known audio frame was presented for playing
  int64_t existingFrameIndex;
  int64_t existingFramePresentationTime;
  aaudio_result_t result = AAudioStream_getTimestamp(stream,
                                                     CLOCK_MONOTONIC,
                                                     &existingFrameIndex,
                                                     &existingFramePresentationTime);

  if (result == AAUDIO_OK){

    // Get the write index for the next audio frame
    int64_t writeIndex = AAudioStream_getFramesWritten(stream);

    // Calculate the number of frames between our known frame and the write index
    int64_t frameIndexDelta = writeIndex - existingFrameIndex;

    // Calculate the time which the next frame will be presented
    int64_t frameTimeDelta = (frameIndexDelta * NANOS_PER_SECOND) / sampleRate_;
    int64_t nextFramePresentationTime = existingFramePresentationTime + frameTimeDelta;

    // Assume that the next frame will be written at the current time
    int64_t nextFrameWriteTime = get_time_nanoseconds(CLOCK_MONOTONIC);

    // Calculate the latency
    *latencyMillis = (double) (nextFramePresentationTime - nextFrameWriteTime)
                           / NANOS_PER_MILLISECOND;
  } else {
    LOGE("Error calculating latency: %s", AAudio_convertResultToText(result));
  }

  return result;
}
/**
 * Creates an audio stream for playback. The audio device used will depend on playbackDeviceId_.
 */
void PlayAudioEngine::createPlaybackStream(){

  AAudioStreamBuilder* builder = createStreamBuilder();

  if (builder != nullptr){

    setupPlaybackStreamParameters(builder);

    aaudio_result_t result = AAudioStreamBuilder_openStream(builder, &playStream_);

    if (result == AAUDIO_OK && playStream_ != nullptr){

      // check that we got PCM_FLOAT format
      if (sampleFormat_ != AAudioStream_getFormat(playStream_)) {
        LOGW("Sample format is not PCM_FLOAT");
      }

      sampleRate_ = AAudioStream_getSampleRate(playStream_);
      framesPerBurst_ = AAudioStream_getFramesPerBurst(playStream_);

      // Set the buffer size to the burst size - this will give us the minimum possible latency
      AAudioStream_setBufferSizeInFrames(playStream_, framesPerBurst_);
      bufSizeInFrames_ = framesPerBurst_;

      PrintAudioStreamInfo(playStream_);
      prepareOscillators();

      // Start the stream - the dataCallback function will start being called
      result = AAudioStream_requestStart(playStream_);
      if (result != AAUDIO_OK) {
        LOGE("Error starting stream. %s", AAudio_convertResultToText(result));
      }

      // Store the underrun count so we can tune the latency in the dataCallback
      playStreamUnderrunCount_ = AAudioStream_getXRunCount(playStream_);

    } else {
      LOGE("Failed to create stream. Error: %s", AAudio_convertResultToText(result));
    }

  AAudioStreamBuilder_delete(builder);

  } else {
    LOGE("Unable to obtain an AAudioStreamBuilder object");
  }
}
/**
 * Creates a stream builder which can be used to construct streams
 * @return a new stream builder object
 */
AAudioStreamBuilder* PlayAudioEngine::createStreamBuilder() {

  AAudioStreamBuilder *builder = nullptr;
  aaudio_result_t result = AAudio_createStreamBuilder(&builder);
  if (result != AAUDIO_OK) {
    LOGE("Error creating stream builder: %s", AAudio_convertResultToText(result));
  }
  return builder;
}
/**
 * @see errorCallback function at top of this file
 */
void PlayAudioEngine::errorCallback(AAudioStream *stream,
                   aaudio_result_t error){

  assert(stream == playStream_);
  LOGD("errorCallback result: %s", AAudio_convertResultToText(error));

  aaudio_stream_state_t streamState = AAudioStream_getState(playStream_);
  if (streamState == AAUDIO_STREAM_STATE_DISCONNECTED){

    // Handle stream restart on a separate thread
    std::function<void(void)> restartStream = std::bind(&PlayAudioEngine::restartStream, this);
    streamRestartThread_ = new std::thread(restartStream);
  }
}
/**
 * @see dataCallback function at top of this file
 */
aaudio_data_callback_result_t PlayAudioEngine::dataCallback(AAudioStream *stream,
                                                        void *audioData,
                                                        int32_t numFrames) {
  assert(stream == playStream_);

  int32_t underrunCount = AAudioStream_getXRunCount(playStream_);
  aaudio_result_t bufferSize = AAudioStream_getBufferSizeInFrames(playStream_);
  bool hasUnderrunCountIncreased = false;
  bool shouldChangeBufferSize = false;

  if (underrunCount > playStreamUnderrunCount_){
    playStreamUnderrunCount_ = underrunCount;
    hasUnderrunCountIncreased = true;
  }

  if (hasUnderrunCountIncreased && bufferSizeSelection_ == BUFFER_SIZE_AUTOMATIC){

    /**
     * This is a buffer size tuning algorithm. If the number of underruns (i.e. instances where
     * we were unable to supply sufficient data to the stream) has increased since the last callback
     * we will try to increase the buffer size by the burst size, which will give us more protection
     * against underruns in future, at the cost of additional latency.
     */
    bufferSize += framesPerBurst_; // Increase buffer size by one burst
    shouldChangeBufferSize = true;
  } else if (bufferSizeSelection_ > 0 && (bufferSizeSelection_ * framesPerBurst_) != bufferSize){

    // If the buffer size selection has changed then update it here
    bufferSize = bufferSizeSelection_ * framesPerBurst_;
    shouldChangeBufferSize = true;
  }

  if (shouldChangeBufferSize){
    LOGD("Setting buffer size to %d", bufferSize);
    bufferSize = AAudioStream_setBufferSizeInFrames(stream, bufferSize);
    if (bufferSize > 0) {
      bufSizeInFrames_ = bufferSize;
    } else {
      LOGE("Error setting buffer size: %s", AAudio_convertResultToText(bufferSize));
    }
  }

  /**
   * The following output can be seen by running a systrace. Tracing is preferable to logging
   * inside the callback since tracing does not block.
   *
   * See https://developer.android.com/studio/profile/systrace-commandline.html
   */
  Trace::beginSection("numFrames %d, Underruns %d, buffer size %d",
                      numFrames, underrunCount, bufferSize);

  int32_t samplesPerFrame = sampleChannels_;

  // If the tone is on we need to use our synthesizer to render the audio data for the sine waves
  if (isToneOn_) {
    sineOscRight_->render(static_cast<float *>(audioData),
                                      samplesPerFrame, numFrames);
    if (sampleChannels_ == 2) {
      sineOscLeft_->render(static_cast<float *>(audioData) + 1,
                                       samplesPerFrame, numFrames);
    }
  } else {
    memset(static_cast<uint8_t *>(audioData), 0,
           sizeof(float) * samplesPerFrame * numFrames);
  }

  calculateCurrentOutputLatencyMillis(stream, &currentOutputLatencyMillis_);

  Trace::endSection();
  return AAUDIO_CALLBACK_RESULT_CONTINUE;
}
static void testOpenOptions(aaudio_direction_t direction,
                            int32_t channelCount,
                            int32_t sampleRate,
                            aaudio_format_t format) {

    aaudio_result_t result = AAUDIO_OK;

    int32_t bufferCapacity;
    int32_t framesPerBurst = 0;

    int32_t actualChannelCount = 0;
    int32_t actualSampleRate = 0;
    aaudio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
    aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
    aaudio_direction_t actualDirection;

    AAudioStreamBuilder *aaudioBuilder = nullptr;
    AAudioStream *aaudioStream = nullptr;

    printf("TestOpen: dir = %d, chans = %3d, rate = %6d format = %d\n",
           direction, channelCount, sampleRate, format);

    // Use an AAudioStreamBuilder to contain requested parameters.
    ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));

    // Request stream properties.
    AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
    AAudioStreamBuilder_setSampleRate(aaudioBuilder, sampleRate);
    AAudioStreamBuilder_setChannelCount(aaudioBuilder, channelCount);
    AAudioStreamBuilder_setFormat(aaudioBuilder, format);
    AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);

    //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
    AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
    //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_POWER_SAVING);

    // Create an AAudioStream using the Builder.
    result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
    if (result != AAUDIO_OK) {
        printf("Stream not opened! That may be OK.\n");
        goto finish;
    }

    // Check to see what kind of stream we actually got.
    actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
    actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
    actualDataFormat = AAudioStream_getFormat(aaudioStream);
    actualDirection = AAudioStream_getDirection(aaudioStream);

    printf("          dir = %d, chans = %3d, rate = %6d format = %d\n",
           direction, actualChannelCount, actualSampleRate, actualDataFormat);

    // If we ask for something specific then we should get that.
    if (channelCount != AAUDIO_UNSPECIFIED) {
        EXPECT_EQ(channelCount, actualChannelCount);
    }
    if (sampleRate != AAUDIO_UNSPECIFIED) {
        EXPECT_EQ(sampleRate, actualSampleRate);
    }
    if (format != AAUDIO_FORMAT_UNSPECIFIED) {
        EXPECT_EQ(format, actualDataFormat);
    }
    EXPECT_EQ(direction, actualDirection);

    // This is the number of frames that are read in one chunk by a DMA controller
    // or a DSP or a mixer.
    framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
    bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
    printf("          bufferCapacity = %d, remainder = %d\n",
           bufferCapacity, bufferCapacity % framesPerBurst);

finish:
    AAudioStream_close(aaudioStream);
    AAudioStreamBuilder_delete(aaudioBuilder);
    printf("          result = %d = %s\n", result, AAudio_convertResultToText(result));
}
int main(int argc, char **argv)
{
    (void)argc; // unused
    AAudioSimplePlayer player;
    SineThreadedData_t myData;
    aaudio_result_t result;

    // Make printf print immediately so that debug info is not stuck
    // in a buffer if we hang or crash.
    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
    printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);

    myData.schedulerChecked = false;

    result = player.open(2, 44100, AAUDIO_FORMAT_PCM_FLOAT,
                         SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
    if (result != AAUDIO_OK) {
        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
        goto error;
    }
    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
    printf("player.getChannelCount() = %d\n", player.getChannelCount());
    myData.sineOsc1.setup(440.0, 48000);
    myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
    myData.sineOsc2.setup(660.0, 48000);
    myData.sineOsc2.setSweep(350.0, 900.0, 7.0);

#if 0
    result = player.prime(); // FIXME crashes AudioTrack.cpp
    if (result != AAUDIO_OK) {
        fprintf(stderr, "ERROR - player.prime() returned %d\n", result);
        goto error;
    }
#endif

    result = player.start();
    if (result != AAUDIO_OK) {
        fprintf(stderr, "ERROR - player.start() returned %d\n", result);
        goto error;
    }

    printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
    for (int second = 0; second < NUM_SECONDS; second++)
    {
        const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);

        aaudio_stream_state_t state;
        result = AAudioStream_waitForStateChange(player.getStream(),
                                                 AAUDIO_STREAM_STATE_CLOSED,
                                                 &state,
                                                 0);
        if (result != AAUDIO_OK) {
            fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
            goto error;
        }
        if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
            printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
            break;
        }
        printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(player.getStream()));
    }
    printf("Woke up now.\n");

    printf("call stop()\n");
    result = player.stop();
    if (result != AAUDIO_OK) {
        goto error;
    }
    printf("call close()\n");
    result = player.close();
    if (result != AAUDIO_OK) {
        goto error;
    }

    if (myData.schedulerChecked) {
        printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
               myData.scheduler,
               SCHED_FIFO);
    }

    printf("SUCCESS\n");
    return EXIT_SUCCESS;
error:
    player.close();
    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
    return EXIT_FAILURE;
}