static void owner_death_callback(ErlNifEnv* env, void* obj, ErlNifPid* pid, ErlNifMonitor* mon) { efile_data_t *d = (efile_data_t*)obj; (void)env; (void)pid; (void)mon; for(;;) { enum efile_state_t previous_state; previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_IDLE); switch(previous_state) { case EFILE_STATE_IDLE: efile_close(d); return; case EFILE_STATE_CLOSE_PENDING: case EFILE_STATE_CLOSED: /* We're either already closed or managed to mark ourselves for * closure in the previous iteration. */ return; case EFILE_STATE_BUSY: /* Schedule ourselves to be closed once the current operation * finishes, retrying the [IDLE -> CLOSED] transition in case we * narrowly passed the [BUSY -> IDLE] one. */ erts_atomic32_cmpxchg_nob(&d->state, EFILE_STATE_CLOSE_PENDING, EFILE_STATE_BUSY); break; } } }
static ERL_NIF_TERM close_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { enum efile_state_t previous_state; efile_data_t *d; ASSERT(argc == 1); if(!get_file_data(env, argv[0], &d)) { return enif_make_badarg(env); } previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_IDLE); if(previous_state == EFILE_STATE_IDLE) { posix_errno_t error; enif_demonitor_process(env, d, &d->monitor); if(!efile_close(d, &error)) { return posix_error_to_tuple(env, error); } return am_ok; } else { /* CLOSE_PENDING should be impossible at this point since it requires * a transition from BUSY; the only valid state here is CLOSED. */ ASSERT(previous_state == EFILE_STATE_CLOSED); return posix_error_to_tuple(env, EINVAL); } }
static void owner_death_callback(ErlNifEnv* env, void* obj, ErlNifPid* pid, ErlNifMonitor* mon) { efile_data_t *d = (efile_data_t*)obj; (void)env; (void)pid; (void)mon; for(;;) { enum efile_state_t previous_state; previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_IDLE); switch(previous_state) { case EFILE_STATE_IDLE: { /* We cannot close the file here as that could block a normal * scheduler, so we tell erts_prim_file to do it for us. * * This can in turn become a bottleneck (especially in cases * like NFS failure), but it's less problematic than blocking * thread progress. */ ERL_NIF_TERM message, file_ref; file_ref = enif_make_resource(env, d); message = enif_make_tuple2(env, am_close, file_ref); if(!enif_send(env, &erts_prim_file_pid, NULL, message)) { ERTS_INTERNAL_ERROR("Failed to defer prim_file close."); } return; } case EFILE_STATE_CLOSE_PENDING: case EFILE_STATE_CLOSED: /* We're either already closed or managed to mark ourselves for * closure in the previous iteration. */ return; case EFILE_STATE_BUSY: /* Schedule ourselves to be closed once the current operation * finishes, retrying the [IDLE -> CLOSED] transition in case we * narrowly passed the [BUSY -> IDLE] one. */ erts_atomic32_cmpxchg_nob(&d->state, EFILE_STATE_CLOSE_PENDING, EFILE_STATE_BUSY); break; } } }
static void gc_callback(ErlNifEnv *env, void* data) { efile_data_t *d = (efile_data_t*)data; enum efile_state_t previous_state; (void)env; previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_IDLE); ASSERT(previous_state != EFILE_STATE_CLOSE_PENDING && previous_state != EFILE_STATE_BUSY); if(previous_state == EFILE_STATE_IDLE) { efile_close(d); } }
static ERL_NIF_TERM file_handle_wrapper(file_op_impl_t operation, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { efile_data_t *d; enum efile_state_t previous_state; ERL_NIF_TERM result; if(argc < 1 || !get_file_data(env, argv[0], &d)) { return enif_make_badarg(env); } previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_BUSY, EFILE_STATE_IDLE); if(previous_state == EFILE_STATE_IDLE) { result = operation(d, env, argc - 1, &argv[1]); previous_state = erts_atomic32_cmpxchg_relb(&d->state, EFILE_STATE_IDLE, EFILE_STATE_BUSY); ASSERT(previous_state != EFILE_STATE_IDLE); if(previous_state == EFILE_STATE_CLOSE_PENDING) { /* This is the only point where a change from CLOSE_PENDING is * possible, and we're running synchronously, so we can't race with * anything else here. */ posix_errno_t ignored; erts_atomic32_set_acqb(&d->state, EFILE_STATE_CLOSED); efile_close(d, &ignored); } } else { /* CLOSE_PENDING should be impossible at this point since it requires * a transition from BUSY; the only valid state here is CLOSED. */ ASSERT(previous_state == EFILE_STATE_CLOSED); result = posix_error_to_tuple(env, EINVAL); } return result; }
static ERL_NIF_TERM close_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { enum efile_state_t previous_state; ASSERT(argc == 0); previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_BUSY); ASSERT(previous_state == EFILE_STATE_CLOSE_PENDING || previous_state == EFILE_STATE_BUSY); if(previous_state == EFILE_STATE_BUSY) { enif_demonitor_process(env, d, &d->monitor); if(!efile_close(d)) { return posix_error_to_tuple(env, d->posix_errno); } } return am_ok; }