/**
 * Calculate the current latency between writing a frame to the output stream and
 * the same frame being presented to the audio hardware.
 *
 * Here's how the calculation works:
 *
 * 1) Get the time a particular frame was presented to the audio hardware
 * @see AAudioStream_getTimestamp
 * 2) From this extrapolate the time which the *next* audio frame written to the stream
 * will be presented
 * 3) Assume that the next audio frame is written at the current time
 * 4) currentLatency = nextFramePresentationTime - nextFrameWriteTime
 *
 * @param stream The stream being written to
 * @param latencyMillis pointer to a variable to receive the latency in milliseconds between
 * writing a frame to the stream and that frame being presented to the audio hardware.
 * @return AAUDIO_OK or a negative error. It is normal to receive an error soon after a stream
 * has started because the timestamps are not yet available.
 */
aaudio_result_t
PlayAudioEngine::calculateCurrentOutputLatencyMillis(AAudioStream *stream, double *latencyMillis) {

  // Get the time that a known audio frame was presented for playing
  int64_t existingFrameIndex;
  int64_t existingFramePresentationTime;
  aaudio_result_t result = AAudioStream_getTimestamp(stream,
                                                     CLOCK_MONOTONIC,
                                                     &existingFrameIndex,
                                                     &existingFramePresentationTime);

  if (result == AAUDIO_OK){

    // Get the write index for the next audio frame
    int64_t writeIndex = AAudioStream_getFramesWritten(stream);

    // Calculate the number of frames between our known frame and the write index
    int64_t frameIndexDelta = writeIndex - existingFrameIndex;

    // Calculate the time which the next frame will be presented
    int64_t frameTimeDelta = (frameIndexDelta * NANOS_PER_SECOND) / sampleRate_;
    int64_t nextFramePresentationTime = existingFramePresentationTime + frameTimeDelta;

    // Assume that the next frame will be written at the current time
    int64_t nextFrameWriteTime = get_time_nanoseconds(CLOCK_MONOTONIC);

    // Calculate the latency
    *latencyMillis = (double) (nextFramePresentationTime - nextFrameWriteTime)
                           / NANOS_PER_MILLISECOND;
  } else {
    LOGE("Error calculating latency: %s", AAudio_convertResultToText(result));
  }

  return result;
}
int main(int argc, char **argv)
{
    (void)argc; // unused
    AAudioSimplePlayer player;
    SineThreadedData_t myData;
    aaudio_result_t result;

    // Make printf print immediately so that debug info is not stuck
    // in a buffer if we hang or crash.
    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
    printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);

    myData.schedulerChecked = false;

    result = player.open(2, 44100, AAUDIO_FORMAT_PCM_FLOAT,
                         SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
    if (result != AAUDIO_OK) {
        fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
        goto error;
    }
    printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
    printf("player.getChannelCount() = %d\n", player.getChannelCount());
    myData.sineOsc1.setup(440.0, 48000);
    myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
    myData.sineOsc2.setup(660.0, 48000);
    myData.sineOsc2.setSweep(350.0, 900.0, 7.0);

#if 0
    result = player.prime(); // FIXME crashes AudioTrack.cpp
    if (result != AAUDIO_OK) {
        fprintf(stderr, "ERROR - player.prime() returned %d\n", result);
        goto error;
    }
#endif

    result = player.start();
    if (result != AAUDIO_OK) {
        fprintf(stderr, "ERROR - player.start() returned %d\n", result);
        goto error;
    }

    printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
    for (int second = 0; second < NUM_SECONDS; second++)
    {
        const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
        (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);

        aaudio_stream_state_t state;
        result = AAudioStream_waitForStateChange(player.getStream(),
                                                 AAUDIO_STREAM_STATE_CLOSED,
                                                 &state,
                                                 0);
        if (result != AAUDIO_OK) {
            fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
            goto error;
        }
        if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
            printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
            break;
        }
        printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(player.getStream()));
    }
    printf("Woke up now.\n");

    printf("call stop()\n");
    result = player.stop();
    if (result != AAUDIO_OK) {
        goto error;
    }
    printf("call close()\n");
    result = player.close();
    if (result != AAUDIO_OK) {
        goto error;
    }

    if (myData.schedulerChecked) {
        printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
               myData.scheduler,
               SCHED_FIFO);
    }

    printf("SUCCESS\n");
    return EXIT_SUCCESS;
error:
    player.close();
    printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
    return EXIT_FAILURE;
}