krb5_error_code KRB5_CALLCONV krb5_rc_dfl_expunge(krb5_context context, krb5_rcache id) { krb5_error_code ret; k5_mutex_lock(&id->lock); ret = krb5_rc_dfl_expunge_locked(context, id); k5_mutex_unlock(&id->lock); return ret; }
krb5_error_code KRB5_CALLCONV krb5_rc_dfl_store(krb5_context context, krb5_rcache id, krb5_donot_replay *rep) { krb5_error_code ret; struct dfl_data *t; krb5_int32 now; ret = krb5_timeofday(context, &now); if (ret) return ret; ret = k5_mutex_lock(&id->lock); if (ret) return ret; switch(rc_store(context, id, rep, now, FALSE)) { case CMP_MALLOC: k5_mutex_unlock(&id->lock); return KRB5_RC_MALLOC; case CMP_REPLAY: k5_mutex_unlock(&id->lock); return KRB5KRB_AP_ERR_REPEAT; case 0: break; default: /* wtf? */ ; } t = (struct dfl_data *)id->data; #ifndef NOIOSTUFF ret = krb5_rc_io_store(context, t, rep); if (ret) { k5_mutex_unlock(&id->lock); return ret; } #endif /* Shall we automatically expunge? */ if (t->nummisses > t->numhits + EXCESSREPS) { ret = krb5_rc_dfl_expunge_locked(context, id); k5_mutex_unlock(&id->lock); return ret; } #ifndef NOIOSTUFF else { if (krb5_rc_io_sync(context, &t->d)) { k5_mutex_unlock(&id->lock); return KRB5_RC_IO; } } #endif k5_mutex_unlock(&id->lock); return 0; }
static krb5_error_code krb5_rc_dfl_recover_locked(krb5_context context, krb5_rcache id) { #ifdef NOIOSTUFF return KRB5_RC_NOIO; #else struct dfl_data *t = (struct dfl_data *)id->data; krb5_donot_replay *rep = 0; krb5_error_code retval; long max_size; int expired_entries = 0; krb5_int32 now; if ((retval = krb5_rc_io_open(context, &t->d, t->name))) { return retval; } t->recovering = 1; max_size = krb5_rc_io_size(context, &t->d); rep = NULL; if (krb5_rc_io_read(context, &t->d, (krb5_pointer) &t->lifespan, sizeof(t->lifespan))) { retval = KRB5_RC_IO; goto io_fail; } if (!(rep = (krb5_donot_replay *) malloc(sizeof(krb5_donot_replay)))) { retval = KRB5_RC_MALLOC; goto io_fail; } rep->client = rep->server = rep->msghash = NULL; if (krb5_timeofday(context, &now)) now = 0; /* now read in each auth_replay and insert into table */ for (;;) { if (krb5_rc_io_mark(context, &t->d)) { retval = KRB5_RC_IO; goto io_fail; } retval = krb5_rc_io_fetch(context, t, rep, (int) max_size); if (retval == KRB5_RC_IO_EOF) break; else if (retval != 0) goto io_fail; if (alive(now, rep, t->lifespan) != CMP_EXPIRED) { if (rc_store(context, id, rep, now, TRUE) == CMP_MALLOC) { retval = KRB5_RC_MALLOC; goto io_fail; } } else { expired_entries++; } /* * free fields allocated by rc_io_fetch */ free(rep->server); free(rep->client); if (rep->msghash) free(rep->msghash); rep->client = rep->server = rep->msghash = NULL; } retval = 0; krb5_rc_io_unmark(context, &t->d); /* * An automatic expunge here could remove the need for * mark/unmark but that would be inefficient. */ io_fail: krb5_rc_free_entry(context, &rep); if (retval) krb5_rc_io_close(context, &t->d); else if (expired_entries > EXCESSREPS) retval = krb5_rc_dfl_expunge_locked(context, id); t->recovering = 0; return retval; #endif }