/* Drop the ref count. If it hits zero, remove the entry from the fcc_set list * and free it. */ static krb5_error_code dereference(krb5_context context, fcc_data *data) { struct fcc_set **fccsp, *temp; k5_cc_mutex_lock(context, &krb5int_cc_file_mutex); for (fccsp = &fccs; *fccsp != NULL; fccsp = &(*fccsp)->next) { if ((*fccsp)->data == data) break; } assert(*fccsp != NULL); assert((*fccsp)->data == data); (*fccsp)->refcount--; if ((*fccsp)->refcount == 0) { data = (*fccsp)->data; temp = *fccsp; *fccsp = (*fccsp)->next; free(temp); k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); k5_cc_mutex_assert_unlocked(context, &data->lock); free(data->filename); zap(data->buf, sizeof(data->buf)); if (data->fd >= 0) { k5_cc_mutex_lock(context, &data->lock); close_cache_file(context, data); k5_cc_mutex_unlock(context, &data->lock); } k5_cc_mutex_destroy(&data->lock); free(data); } else { k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); } return 0; }
/* * Effects: * Destroys the contents of id. id is invalid after call. * * Errors: * system errors (locks related) */ krb5_error_code KRB5_CALLCONV krb5_mcc_destroy(krb5_context context, krb5_ccache id) { krb5_mcc_list_node **curr, *node; krb5_mcc_data *d; k5_cc_mutex_lock(context, &krb5int_mcc_mutex); d = (krb5_mcc_data *)id->data; for (curr = &mcc_head; *curr; curr = &(*curr)->next) { if ((*curr)->cache == d) { node = *curr; *curr = node->next; free(node); break; } } k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); k5_cc_mutex_lock(context, &d->lock); krb5_mcc_free(context, id); free(d->name); k5_cc_mutex_unlock(context, &d->lock); k5_cc_mutex_destroy(&d->lock); free(d); free(id); krb5_change_cache (); return KRB5_OK; }
/* * Effects: * Destroys the contents of id. id is invalid after call. */ krb5_error_code KRB5_CALLCONV krb5_mcc_destroy(krb5_context context, krb5_ccache id) { krb5_mcc_data *d = id->data; krb5_boolean removed_from_table = FALSE; /* Remove this node from the table if it is still present. */ k5_cc_mutex_lock(context, &krb5int_mcc_mutex); if (k5_hashtab_remove(mcc_hashtab, d->name, strlen(d->name))) removed_from_table = TRUE; k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); /* Empty the cache and remove the reference for the table slot. There will * always be at least one reference left for the handle being destroyed. */ k5_cc_mutex_lock(context, &d->lock); empty_mcc_cache(context, d); if (removed_from_table) d->refcount--; k5_cc_mutex_unlock(context, &d->lock); /* Invalidate the handle, possibly removing the last reference to d and * freeing it. */ krb5_mcc_close(context, id); krb5_change_cache (); return KRB5_OK; }
/* Create or overwrite the cache file with a header and default principal. */ static krb5_error_code KRB5_CALLCONV fcc_initialize(krb5_context context, krb5_ccache id, krb5_principal princ) { krb5_error_code ret; fcc_data *data = id->data; int st = 0; k5_cc_mutex_lock(context, &data->lock); MAYBE_OPEN(context, id, FCC_OPEN_AND_ERASE); #if defined(HAVE_FCHMOD) || defined(HAVE_CHMOD) #ifdef HAVE_FCHMOD st = fchmod(data->fd, S_IRUSR | S_IWUSR); #else st = chmod(data->filename, S_IRUSR | S_IWUSR); #endif if (st == -1) { ret = interpret_errno(context, errno); MAYBE_CLOSE(context, id, ret); k5_cc_mutex_unlock(context, &data->lock); return ret; } #endif ret = store_principal(context, id, princ); MAYBE_CLOSE(context, id, ret); k5_cc_mutex_unlock(context, &data->lock); krb5_change_cache(); return ret; }
krb5_error_code KRB5_CALLCONV krb5_mcc_resolve (krb5_context context, krb5_ccache *id, const char *residual) { krb5_ccache lid; krb5_mcc_list_node *ptr; krb5_error_code err; krb5_mcc_data *d; err = k5_cc_mutex_lock(context, &krb5int_mcc_mutex); if (err) return err; for (ptr = mcc_head; ptr; ptr=ptr->next) if (!strcmp(ptr->cache->name, residual)) break; if (ptr) d = ptr->cache; else { err = new_mcc_data(residual, &d); if (err) { k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); return err; } } k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); lid = (krb5_ccache) malloc(sizeof(struct _krb5_ccache)); if (lid == NULL) return KRB5_CC_NOMEM; lid->ops = &krb5_mcc_ops; lid->data = d; *id = lid; return KRB5_OK; }
krb5_error_code KRB5_CALLCONV krb5_mcc_generate_new (krb5_context context, krb5_ccache *id) { krb5_ccache lid; char uniquename[8]; krb5_error_code err; krb5_mcc_data *d; /* Allocate memory */ lid = (krb5_ccache) malloc(sizeof(struct _krb5_ccache)); if (lid == NULL) return KRB5_CC_NOMEM; lid->ops = &krb5_mcc_ops; err = k5_cc_mutex_lock(context, &krb5int_mcc_mutex); if (err) { free(lid); return err; } /* Check for uniqueness with mutex locked to avoid race conditions */ while (1) { krb5_mcc_list_node *ptr; err = krb5int_random_string (context, uniquename, sizeof (uniquename)); if (err) { k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); free(lid); return err; } for (ptr = mcc_head; ptr; ptr=ptr->next) { if (!strcmp(ptr->cache->name, uniquename)) { break; /* got a match, loop again */ } } if (!ptr) break; /* got to the end without finding a match */ } err = new_mcc_data(uniquename, &d); k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); if (err) { free(lid); return err; } lid->data = d; *id = lid; krb5_change_cache (); return KRB5_OK; }
static krb5_error_code KRB5_CALLCONV krb5_mcc_ptcursor_new( krb5_context context, krb5_cc_ptcursor *cursor) { krb5_cc_ptcursor n = NULL; struct krb5_mcc_ptcursor_data *cdata = NULL; *cursor = NULL; n = malloc(sizeof(*n)); if (n == NULL) return ENOMEM; n->ops = &krb5_mcc_ops; cdata = malloc(sizeof(struct krb5_mcc_ptcursor_data)); if (cdata == NULL) { free(n); return ENOMEM; } n->data = cdata; k5_cc_mutex_lock(context, &krb5int_mcc_mutex); cdata->cur = mcc_head; k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); *cursor = n; return 0; }
/* * Modifies: * the memory cache * * Effects: * Save away creds in the ccache. * * Errors: * system errors (mutex locking) * ENOMEM */ krb5_error_code KRB5_CALLCONV krb5_mcc_store(krb5_context ctx, krb5_ccache id, krb5_creds *creds) { krb5_error_code err; krb5_mcc_link *new_node; krb5_mcc_data *mptr = (krb5_mcc_data *)id->data; new_node = malloc(sizeof(krb5_mcc_link)); if (new_node == NULL) return ENOMEM; err = krb5_copy_creds(ctx, creds, &new_node->creds); if (err) goto cleanup; err = k5_cc_mutex_lock(ctx, &mptr->lock); if (err) goto cleanup; new_node->next = mptr->link; mptr->link = new_node; update_mcc_change_time(mptr); k5_cc_mutex_unlock(ctx, &mptr->lock); return 0; cleanup: free(new_node); return err; }
static krb5_error_code KRB5_CALLCONV krb5_mcc_ptcursor_next( krb5_context context, krb5_cc_ptcursor cursor, krb5_ccache *ccache) { krb5_error_code ret = 0; struct krb5_mcc_ptcursor_data *cdata = NULL; *ccache = NULL; cdata = cursor->data; if (cdata->cur == NULL) return 0; *ccache = malloc(sizeof(**ccache)); if (*ccache == NULL) return ENOMEM; (*ccache)->ops = &krb5_mcc_ops; (*ccache)->data = cdata->cur->cache; ret = k5_cc_mutex_lock(context, &krb5int_mcc_mutex); if (ret) goto errout; cdata->cur = cdata->cur->next; ret = k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); if (ret) goto errout; errout: if (ret && *ccache != NULL) { free(*ccache); *ccache = NULL; } return ret; }
/* * Modifies: * id * * Effects: * Creates/refreshes the memory cred cache id. If the cache exists, its * contents are destroyed. * * Errors: * system errors */ krb5_error_code KRB5_CALLCONV krb5_mcc_initialize(krb5_context context, krb5_ccache id, krb5_principal princ) { krb5_os_context os_ctx = &context->os_context; krb5_error_code ret; krb5_mcc_data *d; d = (krb5_mcc_data *)id->data; k5_cc_mutex_lock(context, &d->lock); krb5_mcc_free(context, id); d = (krb5_mcc_data *)id->data; ret = krb5_copy_principal(context, princ, &d->prin); update_mcc_change_time(d); if (os_ctx->os_flags & KRB5_OS_TOFFSET_VALID) { /* Store client time offsets in the cache */ d->time_offset = os_ctx->time_offset; d->usec_offset = os_ctx->usec_offset; } k5_cc_mutex_unlock(context, &d->lock); if (ret == KRB5_OK) krb5_change_cache(); return ret; }
static krb5_error_code KRB5_CALLCONV krb5_mcc_unlock(krb5_context context, krb5_ccache id) { krb5_mcc_data *data = (krb5_mcc_data *) id->data; k5_cc_mutex_unlock(context, &data->lock); return 0; }
krb5_error_code KRB5_CALLCONV krb5_mcc_generate_new (krb5_context context, krb5_ccache *id) { krb5_ccache lid; char uniquename[8]; krb5_error_code err; krb5_mcc_data *d; /* Allocate memory */ lid = (krb5_ccache) malloc(sizeof(struct _krb5_ccache)); if (lid == NULL) return KRB5_CC_NOMEM; lid->ops = &krb5_mcc_ops; k5_cc_mutex_lock(context, &krb5int_mcc_mutex); init_table(context); /* Check for uniqueness with mutex locked to avoid race conditions */ while (1) { err = krb5int_random_string (context, uniquename, sizeof (uniquename)); if (err) { k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); free(lid); return err; } if (k5_hashtab_get(mcc_hashtab, uniquename, strlen(uniquename)) == NULL) break; } err = new_mcc_data(uniquename, &d); k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); if (err) { free(lid); return err; } lid->data = d; *id = lid; krb5_change_cache (); return KRB5_OK; }
/* Prepare for a sequential iteration over the cache file. */ static krb5_error_code KRB5_CALLCONV fcc_start_seq_get(krb5_context context, krb5_ccache id, krb5_cc_cursor *cursor) { krb5_fcc_cursor *fcursor; krb5_error_code ret; fcc_data *data = id->data; k5_cc_mutex_lock(context, &data->lock); fcursor = malloc(sizeof(krb5_fcc_cursor)); if (fcursor == NULL) { k5_cc_mutex_unlock(context, &data->lock); return KRB5_CC_NOMEM; } if (OPENCLOSE(id)) { ret = open_cache_file(context, id, FCC_OPEN_RDONLY); if (ret) { free(fcursor); k5_cc_mutex_unlock(context, &data->lock); return ret; } } /* Make sure we start reading right after the primary principal */ ret = skip_header(context, id); if (ret) { free(fcursor); goto done; } ret = skip_principal(context, id); if (ret) { free(fcursor); goto done; } fcursor->pos = fcc_lseek(data, 0, SEEK_CUR); *cursor = (krb5_cc_cursor)fcursor; done: MAYBE_CLOSE(context, id, ret); k5_cc_mutex_unlock(context, &data->lock); return ret; }
krb5_error_code KRB5_CALLCONV krb5_mcc_resolve (krb5_context context, krb5_ccache *id, const char *residual) { krb5_os_context os_ctx = &context->os_context; krb5_ccache lid; krb5_mcc_list_node *ptr; krb5_error_code err; krb5_mcc_data *d; k5_cc_mutex_lock(context, &krb5int_mcc_mutex); for (ptr = mcc_head; ptr; ptr=ptr->next) if (!strcmp(ptr->cache->name, residual)) break; if (ptr) d = ptr->cache; else { err = new_mcc_data(residual, &d); if (err) { k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); return err; } } k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); lid = (krb5_ccache) malloc(sizeof(struct _krb5_ccache)); if (lid == NULL) return KRB5_CC_NOMEM; if ((context->library_options & KRB5_LIBOPT_SYNC_KDCTIME) && !(os_ctx->os_flags & KRB5_OS_TOFFSET_VALID)) { /* Use the time offset from the cache entry */ os_ctx->time_offset = d->time_offset; os_ctx->usec_offset = d->usec_offset; os_ctx->os_flags = ((os_ctx->os_flags & ~KRB5_OS_TOFFSET_TIME) | KRB5_OS_TOFFSET_VALID); } lid->ops = &krb5_mcc_ops; lid->data = d; *id = lid; return KRB5_OK; }
krb5_error_code KRB5_CALLCONV krb5_mcc_resolve (krb5_context context, krb5_ccache *id, const char *residual) { krb5_os_context os_ctx = &context->os_context; krb5_ccache lid; krb5_error_code err; krb5_mcc_data *d; k5_cc_mutex_lock(context, &krb5int_mcc_mutex); init_table(context); d = k5_hashtab_get(mcc_hashtab, residual, strlen(residual)); if (d != NULL) { k5_cc_mutex_lock(context, &d->lock); d->refcount++; k5_cc_mutex_unlock(context, &d->lock); } else { err = new_mcc_data(residual, &d); if (err) { k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); return err; } } k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); lid = (krb5_ccache) malloc(sizeof(struct _krb5_ccache)); if (lid == NULL) return KRB5_CC_NOMEM; if ((context->library_options & KRB5_LIBOPT_SYNC_KDCTIME) && !(os_ctx->os_flags & KRB5_OS_TOFFSET_VALID)) { /* Use the time offset from the cache entry */ os_ctx->time_offset = d->time_offset; os_ctx->usec_offset = d->usec_offset; os_ctx->os_flags = ((os_ctx->os_flags & ~KRB5_OS_TOFFSET_TIME) | KRB5_OS_TOFFSET_VALID); } lid->ops = &krb5_mcc_ops; lid->data = d; *id = lid; return KRB5_OK; }
static krb5_error_code KRB5_CALLCONV krb5_mcc_last_change_time( krb5_context context, krb5_ccache id, krb5_timestamp *change_time) { krb5_mcc_data *data = (krb5_mcc_data *) id->data; k5_cc_mutex_lock(context, &data->lock); *change_time = data->changetime; k5_cc_mutex_unlock(context, &data->lock); return 0; }
/* * Effects: * Prepares for a sequential search of the credentials cache. * Returns a krb5_cc_cursor to be used with krb5_mcc_next_cred and * krb5_mcc_end_seq_get. * * If the cache is modified between the time of this call and the time * of the final krb5_mcc_end_seq_get, the results are undefined. * * Errors: * KRB5_CC_NOMEM * system errors */ krb5_error_code KRB5_CALLCONV krb5_mcc_start_seq_get(krb5_context context, krb5_ccache id, krb5_cc_cursor *cursor) { krb5_mcc_cursor mcursor; krb5_mcc_data *d; d = id->data; k5_cc_mutex_lock(context, &d->lock); mcursor = d->link; k5_cc_mutex_unlock(context, &d->lock); *cursor = (krb5_cc_cursor) mcursor; return KRB5_OK; }
/* * Effects: * Prepares for a sequential search of the credentials cache. * Returns a krb5_cc_cursor to be used with krb5_mcc_next_cred and * krb5_mcc_end_seq_get. * * If the cache is modified between the time of this call and the time * of the final krb5_mcc_end_seq_get, the results are undefined. * * Errors: * KRB5_CC_NOMEM * system errors */ krb5_error_code KRB5_CALLCONV krb5_mcc_start_seq_get(krb5_context context, krb5_ccache id, krb5_cc_cursor *cursor) { struct mcc_cursor *mcursor; krb5_mcc_data *d; mcursor = malloc(sizeof(*mcursor)); if (mcursor == NULL) return KRB5_CC_NOMEM; d = id->data; k5_cc_mutex_lock(context, &d->lock); mcursor->generation = d->generation; mcursor->next_link = d->link; k5_cc_mutex_unlock(context, &d->lock); *cursor = mcursor; return KRB5_OK; }
/* Get the next credential from the cache file. */ static krb5_error_code KRB5_CALLCONV fcc_next_cred(krb5_context context, krb5_ccache id, krb5_cc_cursor *cursor, krb5_creds *creds) { krb5_error_code ret; krb5_fcc_cursor *fcursor = *cursor; fcc_data *data = id->data; struct k5buf buf; size_t maxsize; unsigned char *bytes; memset(creds, 0, sizeof(*creds)); k5_cc_mutex_lock(context, &data->lock); MAYBE_OPEN(context, id, FCC_OPEN_RDONLY); k5_buf_init_dynamic(&buf); if (fcc_lseek(data, fcursor->pos, SEEK_SET) == -1) { ret = interpret_errno(context, errno); goto cleanup; } /* Load a marshalled cred into memory. */ ret = get_size(context, id, &maxsize); if (ret) return ret; ret = load_cred(context, id, maxsize, &buf); if (ret) goto cleanup; bytes = (unsigned char *)k5_buf_data(&buf); if (bytes == NULL) { ret = ENOMEM; goto cleanup; } /* Unmarshal it from buf into creds. */ fcursor->pos = fcc_lseek(data, 0, SEEK_CUR); ret = k5_unmarshal_cred(bytes, k5_buf_len(&buf), version(id), creds); cleanup: k5_free_buf(&buf); MAYBE_CLOSE(context, id, ret); k5_cc_mutex_unlock(context, &data->lock); return ret; }
/* * Requires: * cursor is a krb5_cc_cursor originally obtained from * krb5_mcc_start_seq_get. * * Modifes: * cursor, creds * * Effects: * Fills in creds with the "next" credentals structure from the cache * id. The actual order the creds are returned in is arbitrary. * Space is allocated for the variable length fields in the * credentials structure, so the object returned must be passed to * krb5_destroy_credential. * * The cursor is updated for the next call to krb5_mcc_next_cred. * * Errors: * system errors */ krb5_error_code KRB5_CALLCONV krb5_mcc_next_cred(krb5_context context, krb5_ccache id, krb5_cc_cursor *cursor, krb5_creds *creds) { struct mcc_cursor *mcursor; krb5_error_code retval; krb5_mcc_data *d = id->data; memset(creds, 0, sizeof(krb5_creds)); mcursor = *cursor; if (mcursor->next_link == NULL) return KRB5_CC_END; /* * Check the cursor generation against the cache generation in case the * cache has been reinitialized or destroyed, freeing the pointer in the * cursor. Keep the cache locked while we copy the creds and advance the * pointer, in case another thread reinitializes the cache after we check * the generation. */ k5_cc_mutex_lock(context, &d->lock); if (mcursor->generation != d->generation) { retval = KRB5_CC_END; goto done; } /* Skip over removed creds. */ while (mcursor->next_link != NULL && mcursor->next_link->creds == NULL) mcursor->next_link = mcursor->next_link->next; if (mcursor->next_link == NULL) { retval = KRB5_CC_END; goto done; } retval = k5_copy_creds_contents(context, mcursor->next_link->creds, creds); if (retval == 0) mcursor->next_link = mcursor->next_link->next; done: k5_cc_mutex_unlock(context, &d->lock); return retval; }
/* * Modifies: * the memory cache * * Effects: * Remove the given creds from the ccache. */ static krb5_error_code KRB5_CALLCONV krb5_mcc_remove_cred(krb5_context context, krb5_ccache cache, krb5_flags flags, krb5_creds *creds) { krb5_mcc_data *data = (krb5_mcc_data *)cache->data; krb5_mcc_link *l; k5_cc_mutex_lock(context, &data->lock); for (l = data->link; l != NULL; l = l->next) { if (l->creds != NULL && krb5int_cc_creds_match_request(context, flags, creds, l->creds)) { krb5_free_creds(context, l->creds); l->creds = NULL; } } k5_cc_mutex_unlock(context, &data->lock); return 0; }
/* * Modifies: * id * * Effects: * Invalidates the id, and frees any resources associated with accessing * the cache. */ krb5_error_code KRB5_CALLCONV krb5_mcc_close(krb5_context context, krb5_ccache id) { krb5_mcc_data *d = id->data; int count; free(id); k5_cc_mutex_lock(context, &d->lock); count = --d->refcount; k5_cc_mutex_unlock(context, &d->lock); if (count == 0) { /* This is the last active handle referencing d and d has been removed * from the table, so we can release it. */ empty_mcc_cache(context, d); free(d->name); k5_cc_mutex_destroy(&d->lock); free(d); } return KRB5_OK; }
/* * Modifies: * id * * Effects: * Creates/refreshes the memory cred cache id. If the cache exists, its * contents are destroyed. * * Errors: * system errors */ krb5_error_code KRB5_CALLCONV krb5_mcc_initialize(krb5_context context, krb5_ccache id, krb5_principal princ) { krb5_error_code ret; krb5_mcc_data *d; d = (krb5_mcc_data *)id->data; ret = k5_cc_mutex_lock(context, &d->lock); if (ret) return ret; krb5_mcc_free(context, id); d = (krb5_mcc_data *)id->data; ret = krb5_copy_principal(context, princ, &d->prin); update_mcc_change_time(d); k5_cc_mutex_unlock(context, &d->lock); if (ret == KRB5_OK) krb5_change_cache(); return ret; }
static krb5_error_code KRB5_CALLCONV krb5_mcc_ptcursor_next( krb5_context context, krb5_cc_ptcursor cursor, krb5_ccache *ccache) { struct krb5_mcc_ptcursor_data *cdata = NULL; *ccache = NULL; cdata = cursor->data; if (cdata->cur == NULL) return 0; *ccache = malloc(sizeof(**ccache)); if (*ccache == NULL) return ENOMEM; (*ccache)->ops = &krb5_mcc_ops; (*ccache)->data = cdata->cur->cache; k5_cc_mutex_lock(context, &krb5int_mcc_mutex); cdata->cur = cdata->cur->next; k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); return 0; }
static krb5_error_code KRB5_CALLCONV krb5_mcc_ptcursor_new( krb5_context context, krb5_cc_ptcursor *cursor) { krb5_error_code ret = 0; krb5_cc_ptcursor n = NULL; struct krb5_mcc_ptcursor_data *cdata = NULL; *cursor = NULL; n = malloc(sizeof(*n)); if (n == NULL) return ENOMEM; n->ops = &krb5_mcc_ops; cdata = malloc(sizeof(struct krb5_mcc_ptcursor_data)); if (cdata == NULL) { ret = ENOMEM; goto errout; } n->data = cdata; ret = k5_cc_mutex_lock(context, &krb5int_mcc_mutex); if (ret) goto errout; cdata->cur = mcc_head; ret = k5_cc_mutex_unlock(context, &krb5int_mcc_mutex); if (ret) goto errout; errout: if (ret) { krb5_mcc_ptcursor_free(context, &n); } *cursor = n; return ret; }
/* Destroy the cache file and release the handle. */ static krb5_error_code KRB5_CALLCONV fcc_destroy(krb5_context context, krb5_ccache id) { krb5_error_code ret = 0; fcc_data *data = id->data; int st, fd; struct stat buf; unsigned long i, size; unsigned int wlen; char zeros[BUFSIZ]; k5_cc_mutex_lock(context, &data->lock); if (OPENCLOSE(id)) { invalidate_cache(data); fd = THREEPARAMOPEN(data->filename, O_RDWR | O_BINARY, 0); if (fd < 0) { ret = interpret_errno(context, errno); goto cleanup; } set_cloexec_fd(fd); data->fd = fd; } else { fcc_lseek(data, 0, SEEK_SET); } #ifdef MSDOS_FILESYSTEM /* * "Disgusting bit of UNIX trivia" - that's how the writers of NFS describe * the ability of UNIX to still write to a file which has been unlinked. * Naturally, the PC can't do this. As a result, we have to delete the * file after we wipe it clean, but that throws off all the error handling * code. So we have do the work ourselves. */ st = fstat(data->fd, &buf); if (st == -1) { ret = interpret_errno(context, errno); size = 0; /* Nothing to wipe clean */ } else { size = (unsigned long)buf.st_size; } memset(zeros, 0, BUFSIZ); while (size > 0) { wlen = (int)((size > BUFSIZ) ? BUFSIZ : size); /* How much to write */ i = write(data->fd, zeros, wlen); if (i < 0) { ret = interpret_errno(context, errno); /* Don't jump to cleanup--we still want to delete the file. */ break; } size -= i; } if (OPENCLOSE(id)) { (void)close(((fcc_data *)id->data)->fd); data->fd = -1; } st = unlink(data->filename); if (st < 0) { ret = interpret_errno(context, errno); goto cleanup; } #else /* MSDOS_FILESYSTEM */ st = unlink(data->filename); if (st < 0) { ret = interpret_errno(context, errno); if (OPENCLOSE(id)) { (void)close(data->fd); data->fd = -1; } goto cleanup; } st = fstat(data->fd, &buf); if (st < 0) { ret = interpret_errno(context, errno); if (OPENCLOSE(id)) { (void)close(data->fd); data->fd = -1; } goto cleanup; } /* XXX This may not be legal XXX */ size = (unsigned long)buf.st_size; memset(zeros, 0, BUFSIZ); for (i = 0; i < size / BUFSIZ; i++) { if (write(data->fd, zeros, BUFSIZ) < 0) { ret = interpret_errno(context, errno); if (OPENCLOSE(id)) { (void)close(data->fd); data->fd = -1; } goto cleanup; } } wlen = size % BUFSIZ; if (write(data->fd, zeros, wlen) < 0) { ret = interpret_errno(context, errno); if (OPENCLOSE(id)) { (void)close(data->fd); data->fd = -1; } goto cleanup; } st = close(data->fd); data->fd = -1; if (st) ret = interpret_errno(context, errno); #endif /* MSDOS_FILESYSTEM */ cleanup: k5_cc_mutex_unlock(context, &data->lock); dereference(context, data); free(id); krb5_change_cache(); return ret; }
/* Create a file ccache handle for the pathname given by residual. */ static krb5_error_code KRB5_CALLCONV fcc_resolve(krb5_context context, krb5_ccache *id, const char *residual) { krb5_ccache lid; krb5_error_code ret; fcc_data *data; struct fcc_set *setptr; k5_cc_mutex_lock(context, &krb5int_cc_file_mutex); for (setptr = fccs; setptr; setptr = setptr->next) { if (!strcmp(setptr->data->filename, residual)) break; } if (setptr) { data = setptr->data; assert(setptr->refcount != 0); setptr->refcount++; assert(setptr->refcount != 0); k5_cc_mutex_lock(context, &data->lock); k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); } else { data = malloc(sizeof(fcc_data)); if (data == NULL) { k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); return KRB5_CC_NOMEM; } data->filename = strdup(residual); if (data->filename == NULL) { k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); free(data); return KRB5_CC_NOMEM; } ret = k5_cc_mutex_init(&data->lock); if (ret) { k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); free(data->filename); free(data); return ret; } k5_cc_mutex_lock(context, &data->lock); /* data->version,mode filled in for real later */ data->version = data->mode = 0; data->flags = KRB5_TC_OPENCLOSE; data->fd = -1; data->valid_bytes = 0; setptr = malloc(sizeof(struct fcc_set)); if (setptr == NULL) { k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); k5_cc_mutex_unlock(context, &data->lock); k5_cc_mutex_destroy(&data->lock); free(data->filename); free(data); return KRB5_CC_NOMEM; } setptr->refcount = 1; setptr->data = data; setptr->next = fccs; fccs = setptr; k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex); } k5_cc_mutex_assert_locked(context, &data->lock); k5_cc_mutex_unlock(context, &data->lock); lid = malloc(sizeof(struct _krb5_ccache)); if (lid == NULL) { dereference(context, data); return KRB5_CC_NOMEM; } lid->ops = &krb5_fcc_ops; lid->data = data; lid->magic = KV5M_CCACHE; /* Other routines will get errors on open, and callers must expect them, if * cache is non-existent/unusable. */ *id = lid; return 0; }