/* * Delete or update a particular child node * * ADL - 2/23/99, rewritten TYT 2/25/99 */ errcode_t KRB5_CALLCONV profile_update_relation(profile_t profile, const char **names, const char *old_value, const char *new_value) { errcode_t retval; struct profile_node *section, *node; void *state; const char **cpp; if (profile->vt) { if (!profile->vt->update_relation) return PROF_UNSUPPORTED; return profile->vt->update_relation(profile->cbdata, names, old_value, new_value); } retval = rw_setup(profile); if (retval) return retval; if (names == 0 || names[0] == 0 || names[1] == 0) return PROF_BAD_NAMESET; if (!old_value || !*old_value) return PROF_EINVAL; k5_mutex_lock(&profile->first_file->data->lock); section = profile->first_file->data->root; for (cpp = names; cpp[1]; cpp++) { state = 0; retval = profile_find_node(section, *cpp, 0, 1, &state, §ion); if (retval) { k5_mutex_unlock(&profile->first_file->data->lock); return retval; } } state = 0; retval = profile_find_node(section, *cpp, old_value, 0, &state, &node); if (retval == 0) { if (new_value) retval = profile_set_relation_value(node, new_value); else retval = profile_remove_node(node); } if (retval == 0) profile->first_file->data->flags |= PROFILE_FILE_DIRTY; k5_mutex_unlock(&profile->first_file->data->lock); return retval; }
static krb5_error_code ctx_iterate(krb5_context context, krb5_db2_context *dbc, krb5_error_code (*func)(krb5_pointer, krb5_db_entry *), krb5_pointer func_arg) { DB *db; DBT key, contents; krb5_data contdata; krb5_db_entry *entry; krb5_error_code retval, retval2; int dbret; retval = ctx_lock(context, dbc, KRB5_LOCKMODE_SHARED); if (retval) return retval; db = dbc->db; dbret = db->seq(db, &key, &contents, R_FIRST); while (dbret == 0) { contdata.data = contents.data; contdata.length = contents.size; retval = krb5_decode_princ_entry(context, &contdata, &entry); if (retval) break; retval = k5_mutex_unlock(krb5_db2_mutex); if (retval) break; retval = (*func)(func_arg, entry); krb5_dbe_free(context, entry); retval2 = k5_mutex_lock(krb5_db2_mutex); /* Note: If re-locking fails, the wrapper in db2_exp.c will still try to unlock it again. That would be a bug. Fix when integrating the locking better. */ if (retval) break; if (retval2) { retval = retval2; break; } dbret = db->seq(db, &key, &contents, R_NEXT); } switch (dbret) { case 1: case 0: break; case -1: default: retval = errno; } (void) ctx_unlock(context, dbc); return retval; }
krb5_error_code KRB5_CALLCONV krb5_rc_dfl_store(krb5_context context, krb5_rcache id, krb5_donot_replay *rep) { krb5_error_code ret; struct dfl_data *t; krb5_int32 now; ret = krb5_timeofday(context, &now); if (ret) return ret; ret = k5_mutex_lock(&id->lock); if (ret) return ret; switch(rc_store(context, id, rep, now, FALSE)) { case CMP_MALLOC: k5_mutex_unlock(&id->lock); return KRB5_RC_MALLOC; case CMP_REPLAY: k5_mutex_unlock(&id->lock); return KRB5KRB_AP_ERR_REPEAT; case 0: break; default: /* wtf? */ ; } t = (struct dfl_data *)id->data; #ifndef NOIOSTUFF ret = krb5_rc_io_store(context, t, rep); if (ret) { k5_mutex_unlock(&id->lock); return ret; } #endif /* Shall we automatically expunge? */ if (t->nummisses > t->numhits + EXCESSREPS) { ret = krb5_rc_dfl_expunge_locked(context, id); k5_mutex_unlock(&id->lock); return ret; } #ifndef NOIOSTUFF else { if (krb5_rc_io_sync(context, &t->d)) { k5_mutex_unlock(&id->lock); return KRB5_RC_IO; } } #endif k5_mutex_unlock(&id->lock); return 0; }
krb5_error_code KRB5_CALLCONV krb5_rc_dfl_close(krb5_context context, krb5_rcache id) { krb5_error_code retval; retval = k5_mutex_lock(&id->lock); if (retval) return retval; krb5_rc_dfl_close_no_free(context, id); k5_mutex_unlock(&id->lock); k5_mutex_destroy(&id->lock); free(id); return 0; }
cc_int32 cci_context_change_time_get (cc_time_t *out_change_time) { cc_int32 err = ccNoError; err = k5_mutex_lock (&g_change_time_mutex); if (!err) { *out_change_time = g_change_time + g_change_time_offset; k5_mutex_unlock (&g_change_time_mutex); } return err; }
krb5_error_code KRB5_CALLCONV krb5_rc_dfl_recover_or_init(krb5_context context, krb5_rcache id, krb5_deltat lifespan) { krb5_error_code retval; k5_mutex_lock(&id->lock); retval = krb5_rc_dfl_recover_locked(context, id); if (retval) retval = krb5_rc_dfl_init_locked(context, id, lifespan); k5_mutex_unlock(&id->lock); return retval; }
errcode_t profile_flush_file_data_to_file(prf_data_t data, const char *outfile) { errcode_t retval = 0; if (!data || data->magic != PROF_MAGIC_FILE_DATA) return PROF_MAGIC_FILE_DATA; retval = k5_mutex_lock(&data->lock); if (retval) return retval; retval = write_data_to_file(data, outfile, 1); k5_mutex_unlock(&data->lock); return retval; }
OM_uint32 KRB5_CALLCONV krb5_gss_delete_name_attribute(OM_uint32 *minor_status, gss_name_t name, gss_buffer_t attr) { krb5_context context; krb5_error_code code; krb5_gss_name_t kname; krb5_data kattr; if (minor_status != NULL) *minor_status = 0; code = krb5_gss_init_context(&context); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } kname = (krb5_gss_name_t)name; code = k5_mutex_lock(&kname->lock); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } if (kname->ad_context == NULL) { code = krb5_authdata_context_init(context, &kname->ad_context); if (code != 0) { *minor_status = code; k5_mutex_unlock(&kname->lock); krb5_free_context(context); return GSS_S_UNAVAILABLE; } } kattr.data = (char *)attr->value; kattr.length = attr->length; code = krb5_authdata_delete_attribute(context, kname->ad_context, &kattr); k5_mutex_unlock(&kname->lock); krb5_free_context(context); return kg_map_name_error(minor_status, code); }
krb5_error_code KRB5_CALLCONV krb5_rc_dfl_get_span(krb5_context context, krb5_rcache id, krb5_deltat *lifespan) { krb5_error_code err; struct dfl_data *t; err = k5_mutex_lock(&id->lock); if (err) return err; t = (struct dfl_data *) id->data; *lifespan = t->lifespan; k5_mutex_unlock(&id->lock); return 0; }
cc_int32 cci_context_change_time_sync (cci_identifier_t in_new_identifier) { cc_int32 err = ccNoError; cc_int32 lock_err = err = k5_mutex_lock (&g_change_time_mutex); cc_uint32 server_ids_match = 0; cc_uint32 server_was_running = 0; cc_uint32 server_is_running = 0; if (!err) { if (!in_new_identifier) { err = cci_check_error (err); } } if (!err) { err = cci_context_change_time_update_identifier (in_new_identifier, &server_ids_match, &server_was_running, &server_is_running); } if (!err && !server_ids_match) { /* Increment the change time so callers re-read */ g_change_time_offset++; /* If the server died, absorb the offset */ if (server_was_running && !server_is_running) { cc_time_t now = time (NULL); g_change_time += g_change_time_offset; g_change_time_offset = 0; /* Make sure the change time increases, ideally with the current time */ g_change_time = (g_change_time < now) ? now : g_change_time; } cci_debug_printf ("%s noticed server changed (" "server_was_running = %d; server_is_running = %d; " "g_change_time = %d; g_change_time_offset = %d", __FUNCTION__, server_was_running, server_is_running, g_change_time, g_change_time_offset); } if (!lock_err) { k5_mutex_unlock (&g_change_time_mutex); } return err; }
int k5_key_delete (k5_key_t keynum) { assert(keynum >= 0 && keynum < K5_KEY_MAX); #ifndef ENABLE_THREADS assert(destructors_set[keynum] == 1); if (destructors[keynum] && tsd_no_threads.values[keynum]) (*destructors[keynum])(tsd_no_threads.values[keynum]); destructors[keynum] = 0; tsd_no_threads.values[keynum] = 0; destructors_set[keynum] = 0; #elif defined(_WIN32) /* XXX: This can raise EXCEPTION_POSSIBLE_DEADLOCK. */ EnterCriticalSection(&key_lock); /* XXX Memory leak here! Need to destroy the associated data for all threads. But watch for race conditions in case threads are going away too. */ assert(destructors_set[keynum] == 1); destructors_set[keynum] = 0; destructors[keynum] = 0; LeaveCriticalSection(&key_lock); #else /* POSIX */ { int err; /* XXX RESOURCE LEAK: Need to destroy the allocated objects first! */ err = k5_mutex_lock(&key_lock); if (err == 0) { assert(destructors_set[keynum] == 1); destructors_set[keynum] = 0; destructors[keynum] = NULL; k5_mutex_unlock(&key_lock); } } #endif return 0; }
errcode_t profile_flush_file_data(prf_data_t data) { errcode_t retval = 0; if (!data || data->magic != PROF_MAGIC_FILE_DATA) return PROF_MAGIC_FILE_DATA; k5_mutex_lock(&data->lock); if ((data->flags & PROFILE_FILE_DIRTY) == 0) { k5_mutex_unlock(&data->lock); return 0; } retval = write_data_to_file(data, data->filespec, 0); k5_mutex_unlock(&data->lock); return retval; }
krb5_error_code kg_duplicate_name(krb5_context context, const krb5_gss_name_t src, krb5_gss_name_t *dst) { krb5_error_code code; code = k5_mutex_lock(&src->lock); if (code != 0) return code; code = kg_init_name(context, src->princ, src->service, src->host, src->ad_context, 0, dst); k5_mutex_unlock(&src->lock); return code; }
/*ARGSUSED*/ krb5_error_code krb5_rc_resolve_type(krb5_context context, krb5_rcache *id, char *type) { struct krb5_rc_typelist *t; krb5_error_code err; err = k5_mutex_lock(&rc_typelist_lock); if (err) return err; for (t = typehead;t && strcmp(t->ops->type,type);t = t->next) ; if (!t) { k5_mutex_unlock(&rc_typelist_lock); return KRB5_RC_TYPE_NOTFOUND; } /* allocate *id? nah */ (*id)->ops = t->ops; k5_mutex_unlock(&rc_typelist_lock); return k5_mutex_init(&(*id)->lock); }
kim_string kim_error_message (kim_error in_error) { int lock_err = 0; kim_last_error last_error = NULL; kim_string message = NULL; lock_err = k5_mutex_lock (&kim_error_lock); if (!lock_err) { last_error = k5_getspecific (K5_KEY_KIM_ERROR_MESSAGE); if (last_error && last_error->code == in_error) { message = last_error->message; } } if (!lock_err) { k5_mutex_unlock (&kim_error_lock); } return message ? message : error_message (kim_error_remap (in_error)); }
/* * Rename a particular section; if the new_section name is NULL, * delete it. * * ADL - 2/23/99, rewritten TYT 2/25/99 */ errcode_t KRB5_CALLCONV profile_rename_section(profile_t profile, const char **names, const char *new_name) { errcode_t retval; struct profile_node *section, *node; void *state; const char **cpp; retval = rw_setup(profile); if (retval) return retval; if (names == 0 || names[0] == 0 || names[1] == 0) return PROF_BAD_NAMESET; retval = k5_mutex_lock(&profile->first_file->data->lock); if (retval) return retval; section = profile->first_file->data->root; for (cpp = names; cpp[1]; cpp++) { state = 0; retval = profile_find_node(section, *cpp, 0, 1, &state, §ion); if (retval) { k5_mutex_unlock(&profile->first_file->data->lock); return retval; } } state = 0; retval = profile_find_node(section, *cpp, 0, 1, &state, &node); if (retval == 0) { if (new_name) retval = profile_rename_node(node, new_name); else retval = profile_remove_node(node); } if (retval == 0) profile->first_file->data->flags |= PROFILE_FILE_DIRTY; k5_mutex_unlock(&profile->first_file->data->lock); return retval; }
/* Copy a vtable profile. */ static errcode_t copy_vtable_profile(profile_t profile, profile_t *ret_new_profile) { errcode_t err; void *cbdata; profile_t new_profile; *ret_new_profile = NULL; if (profile->vt->copy) { /* Make a copy of profile's cbdata for the new profile. */ err = profile->vt->copy(profile->cbdata, &cbdata); if (err) return err; err = init_module(profile->vt, cbdata, profile->lib_handle, &new_profile); if (err && profile->vt->cleanup) profile->vt->cleanup(cbdata); } else { /* Use the same cbdata as the old profile. */ err = init_module(profile->vt, profile->cbdata, profile->lib_handle, &new_profile); } if (err) return err; /* Increment the refcount on the library handle if there is one. */ if (profile->lib_handle) { err = k5_mutex_lock(&profile->lib_handle->lock); if (err) { /* Don't decrement the refcount we failed to increment. */ new_profile->lib_handle = NULL; profile_abandon(new_profile); return err; } profile->lib_handle->refcount++; k5_mutex_unlock(&profile->lib_handle->lock); } *ret_new_profile = new_profile; return 0; }
int k5_key_register (k5_key_t keynum, void (*destructor)(void *)) { int err; err = CALL_INIT_FUNCTION(krb5int_thread_support_init); if (err) return err; assert(keynum >= 0 && keynum < K5_KEY_MAX); #ifndef ENABLE_THREADS assert(destructors_set[keynum] == 0); destructors[keynum] = destructor; destructors_set[keynum] = 1; err = 0; #elif defined(_WIN32) /* XXX: This can raise EXCEPTION_POSSIBLE_DEADLOCK. */ EnterCriticalSection(&key_lock); assert(destructors_set[keynum] == 0); destructors_set[keynum] = 1; destructors[keynum] = destructor; LeaveCriticalSection(&key_lock); err = 0; #else /* POSIX */ err = k5_mutex_lock(&key_lock); if (err == 0) { assert(destructors_set[keynum] == 0); destructors_set[keynum] = 1; destructors[keynum] = destructor; err = k5_mutex_unlock(&key_lock); } #endif return 0; }
static void k5_cli_ipc_thread_fini (void) { int err = 0; err = k5_mutex_lock (&g_service_ports_mutex); if (!err) { int i; for (i = 0; i < KIPC_SERVICE_COUNT; i++) { if (MACH_PORT_VALID (g_service_ports[i].service_port)) { mach_port_destroy (mach_task_self (), g_service_ports[i].service_port); g_service_ports[i].service_port = MACH_PORT_NULL; } } k5_mutex_unlock (&g_service_ports_mutex); } k5_key_delete (K5_KEY_IPC_CONNECTION_INFO); k5_mutex_destroy (&g_service_ports_mutex); }
errcode_t KRB5_CALLCONV remove_error_table(const struct error_table *et) { struct et_list **ep, *e; if (CALL_INIT_FUNCTION(com_err_initialize)) return 0; k5_mutex_lock(&et_list_lock); /* Remove the entry that matches the error table instance. */ for (ep = &et_list; *ep; ep = &(*ep)->next) { if ((*ep)->table == et) { e = *ep; *ep = e->next; free(e); k5_mutex_unlock(&et_list_lock); return 0; } } k5_mutex_unlock(&et_list_lock); return ENOENT; }
void com_err_terminate(void) { struct et_list *e, *enext; if (! INITIALIZER_RAN(com_err_initialize) || PROGRAM_EXITING()) { #ifdef SHOW_INITFINI_FUNCS printf("com_err_terminate: skipping\n"); #endif return; } #ifdef SHOW_INITFINI_FUNCS printf("com_err_terminate\n"); #endif k5_key_delete(K5_KEY_COM_ERR); k5_mutex_destroy(&com_err_hook_lock); k5_mutex_lock(&et_list_lock); for (e = et_list; e; e = enext) { enext = e->next; free(e); } k5_mutex_unlock(&et_list_lock); k5_mutex_destroy(&et_list_lock); terminated = 1; }
OM_uint32 krb5_gss_validate_cred_1(OM_uint32 *minor_status, gss_cred_id_t cred_handle, krb5_context context) { krb5_gss_cred_id_t cred; krb5_error_code code; krb5_principal princ; if (!kg_validate_cred_id(cred_handle)) { *minor_status = (OM_uint32) G_VALIDATE_FAILED; return(GSS_S_CALL_BAD_STRUCTURE|GSS_S_DEFECTIVE_CREDENTIAL); } cred = (krb5_gss_cred_id_t) cred_handle; code = k5_mutex_lock(&cred->lock); if (code) { *minor_status = code; return GSS_S_FAILURE; } if (cred->ccache) { if ((code = krb5_cc_get_principal(context, cred->ccache, &princ))) { k5_mutex_unlock(&cred->lock); *minor_status = code; return(GSS_S_DEFECTIVE_CREDENTIAL); } if (!krb5_principal_compare(context, princ, cred->princ)) { k5_mutex_unlock(&cred->lock); *minor_status = KG_CCACHE_NOMATCH; return(GSS_S_DEFECTIVE_CREDENTIAL); } (void)krb5_free_principal(context, princ); } *minor_status = 0; return GSS_S_COMPLETE; }
krb5_error_code krb5_db2_iterate_ext(krb5_context context, krb5_error_code(*func) (krb5_pointer, krb5_db_entry *), krb5_pointer func_arg, int backwards, int recursive) { krb5_db2_context *db_ctx; DB *db; DBT key, contents; krb5_data contdata; krb5_db_entry *entry; krb5_error_code retval; int dbret; void *cookie; cookie = NULL; if (!k5db2_inited(context)) return KRB5_KDB_DBNOTINITED; db_ctx = context->dal_handle->db_context; retval = krb5_db2_lock(context, KRB5_LOCKMODE_SHARED); if (retval) return retval; db = db_ctx->db; if (recursive && db->type != DB_BTREE) { (void) krb5_db2_unlock(context); return KRB5_KDB_UK_RERROR; /* Not optimal, but close enough. */ } if (!recursive) { dbret = (*db->seq) (db, &key, &contents, backwards ? R_LAST : R_FIRST); } else { #ifdef HAVE_BT_RSEQ dbret = bt_rseq(db, &key, &contents, &cookie, backwards ? R_LAST : R_FIRST); #else (void) krb5_db2_unlock(context); return KRB5_KDB_UK_RERROR; /* Not optimal, but close enough. */ #endif } while (dbret == 0) { krb5_error_code retval2; contdata.data = contents.data; contdata.length = contents.size; retval = krb5_decode_princ_entry(context, &contdata, &entry); if (retval) break; retval = k5_mutex_unlock(krb5_db2_mutex); if (retval) break; retval = (*func)(func_arg, entry); krb5_dbe_free(context, entry); retval2 = k5_mutex_lock(krb5_db2_mutex); /* Note: If re-locking fails, the wrapper in db2_exp.c will still try to unlock it again. That would be a bug. Fix when integrating the locking better. */ if (retval) break; if (retval2) { retval = retval2; break; } if (!recursive) { dbret = (*db->seq) (db, &key, &contents, backwards ? R_PREV : R_NEXT); } else { #ifdef HAVE_BT_RSEQ dbret = bt_rseq(db, &key, &contents, &cookie, backwards ? R_PREV : R_NEXT); #else (void) krb5_db2_unlock(context); return KRB5_KDB_UK_RERROR; /* Not optimal, but close enough. */ #endif } } switch (dbret) { case 1: case 0: break; case -1: default: retval = errno; } (void) krb5_db2_unlock(context); return retval; }
/* Callable versions of the various macros. */ void KRB5_CALLCONV krb5int_mutex_lock (k5_mutex_t *m) { k5_mutex_lock (m); }
static OM_uint32 kg_impersonate_name(OM_uint32 *minor_status, const krb5_gss_cred_id_t impersonator_cred, const krb5_gss_name_t user, OM_uint32 time_req, krb5_gss_cred_id_t *output_cred, OM_uint32 *time_rec, krb5_context context) { OM_uint32 major_status; krb5_error_code code; krb5_creds in_creds, *out_creds = NULL; *output_cred = NULL; memset(&in_creds, 0, sizeof(in_creds)); in_creds.client = user->princ; in_creds.server = impersonator_cred->name->princ; if (impersonator_cred->req_enctypes != NULL) in_creds.keyblock.enctype = impersonator_cred->req_enctypes[0]; code = k5_mutex_lock(&user->lock); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } if (user->ad_context != NULL) { code = krb5_authdata_export_authdata(context, user->ad_context, AD_USAGE_TGS_REQ, &in_creds.authdata); if (code != 0) { k5_mutex_unlock(&user->lock); *minor_status = code; return GSS_S_FAILURE; } } k5_mutex_unlock(&user->lock); code = krb5_get_credentials_for_user(context, KRB5_GC_CANONICALIZE | KRB5_GC_NO_STORE, impersonator_cred->ccache, &in_creds, NULL, &out_creds); if (code != 0) { krb5_free_authdata(context, in_creds.authdata); *minor_status = code; return GSS_S_FAILURE; } major_status = kg_compose_deleg_cred(minor_status, impersonator_cred, out_creds, time_req, output_cred, time_rec, context); krb5_free_authdata(context, in_creds.authdata); krb5_free_creds(context, out_creds); return major_status; }
OM_uint32 KRB5_CALLCONV gss_krb5int_copy_ccache(OM_uint32 *minor_status, gss_cred_id_t cred_handle, const gss_OID desired_object, const gss_buffer_t value) { krb5_gss_cred_id_t k5creds; krb5_cc_cursor cursor; krb5_creds creds; krb5_error_code code; krb5_context context; krb5_ccache out_ccache; assert(value->length == sizeof(out_ccache)); if (value->length != sizeof(out_ccache)) return GSS_S_FAILURE; out_ccache = (krb5_ccache)value->value; /* cred handle will have been validated by gssspi_set_cred_option() */ k5creds = (krb5_gss_cred_id_t) cred_handle; code = k5_mutex_lock(&k5creds->lock); if (code) { *minor_status = code; return GSS_S_FAILURE; } if (k5creds->usage == GSS_C_ACCEPT) { k5_mutex_unlock(&k5creds->lock); *minor_status = (OM_uint32) G_BAD_USAGE; return(GSS_S_FAILURE); } code = krb5_gss_init_context(&context); if (code) { k5_mutex_unlock(&k5creds->lock); *minor_status = code; return GSS_S_FAILURE; } code = krb5_cc_start_seq_get(context, k5creds->ccache, &cursor); if (code) { k5_mutex_unlock(&k5creds->lock); *minor_status = code; save_error_info(*minor_status, context); krb5_free_context(context); return(GSS_S_FAILURE); } while (!code && !krb5_cc_next_cred(context, k5creds->ccache, &cursor, &creds)) { code = krb5_cc_store_cred(context, out_ccache, &creds); krb5_free_cred_contents(context, &creds); } krb5_cc_end_seq_get(context, k5creds->ccache, &cursor); k5_mutex_unlock(&k5creds->lock); *minor_status = code; if (code) save_error_info(*minor_status, context); krb5_free_context(context); return code ? GSS_S_FAILURE : GSS_S_COMPLETE; }
OM_uint32 KRB5_CALLCONV krb5_gss_export_name_composite(OM_uint32 *minor_status, gss_name_t name, gss_buffer_t exp_composite_name) { krb5_context context; krb5_error_code code; krb5_gss_name_t kname; krb5_data *attrs = NULL; char *princstr = NULL; unsigned char *cp; size_t princlen; if (minor_status != NULL) *minor_status = 0; code = krb5_gss_init_context(&context); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } kname = (krb5_gss_name_t)name; code = k5_mutex_lock(&kname->lock); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } code = krb5_unparse_name(context, kname->princ, &princstr); if (code != 0) goto cleanup; princlen = strlen(princstr); if (kname->ad_context != NULL) { code = krb5_authdata_export_attributes(context, kname->ad_context, AD_USAGE_MASK, &attrs); if (code != 0) goto cleanup; } /* 04 02 OID Name AuthData */ exp_composite_name->length = 10 + gss_mech_krb5->length + princlen; if (attrs != NULL) exp_composite_name->length += 4 + attrs->length; exp_composite_name->value = malloc(exp_composite_name->length); if (exp_composite_name->value == NULL) { code = ENOMEM; goto cleanup; } cp = exp_composite_name->value; /* Note: we assume the OID will be less than 128 bytes... */ *cp++ = 0x04; if (attrs != NULL) *cp++ = 0x02; else *cp++ = 0x01; store_16_be(gss_mech_krb5->length + 2, cp); cp += 2; *cp++ = 0x06; *cp++ = (gss_mech_krb5->length) & 0xFF; memcpy(cp, gss_mech_krb5->elements, gss_mech_krb5->length); cp += gss_mech_krb5->length; store_32_be(princlen, cp); cp += 4; memcpy(cp, princstr, princlen); cp += princlen; if (attrs != NULL) { store_32_be(attrs->length, cp); cp += 4; memcpy(cp, attrs->data, attrs->length); cp += attrs->length; } cleanup: krb5_free_unparsed_name(context, princstr); krb5_free_data(context, attrs); k5_mutex_unlock(&kname->lock); krb5_free_context(context); return kg_map_name_error(minor_status, code); }
OM_uint32 KRB5_CALLCONV krb5_gss_get_name_attribute(OM_uint32 *minor_status, gss_name_t name, gss_buffer_t attr, int *authenticated, int *complete, gss_buffer_t value, gss_buffer_t display_value, int *more) { krb5_context context; krb5_error_code code; krb5_gss_name_t kname; krb5_data kattr; krb5_boolean kauthenticated; krb5_boolean kcomplete; krb5_data kvalue; krb5_data kdisplay_value; if (minor_status != NULL) *minor_status = 0; code = krb5_gss_init_context(&context); if (code != 0) { *minor_status = code; return GSS_S_FAILURE; } kname = (krb5_gss_name_t)name; code = k5_mutex_lock(&kname->lock); if (code != 0) { *minor_status = code; krb5_free_context(context); return GSS_S_FAILURE; } if (kname->ad_context == NULL) { code = krb5_authdata_context_init(context, &kname->ad_context); if (code != 0) { *minor_status = code; k5_mutex_unlock(&kname->lock); krb5_free_context(context); return GSS_S_UNAVAILABLE; } } kattr.data = (char *)attr->value; kattr.length = attr->length; kauthenticated = FALSE; kcomplete = FALSE; code = krb5_authdata_get_attribute(context, kname->ad_context, &kattr, &kauthenticated, &kcomplete, value ? &kvalue : NULL, display_value ? &kdisplay_value : NULL, more); if (code == 0) { if (value != NULL) code = data_to_gss(&kvalue, value); if (authenticated != NULL) *authenticated = kauthenticated; if (complete != NULL) *complete = kcomplete; if (display_value != NULL) { if (code == 0) code = data_to_gss(&kdisplay_value, display_value); else free(kdisplay_value.data); } } k5_mutex_unlock(&kname->lock); krb5_free_context(context); return kg_map_name_error(minor_status, code); }
errcode_t profile_update_file_data(prf_data_t data) { errcode_t retval; #ifdef HAVE_STAT struct stat st; unsigned long frac; time_t now; #endif FILE *f; retval = k5_mutex_lock(&data->lock); if (retval) return retval; #ifdef HAVE_STAT now = time(0); if (now == data->last_stat && data->root != NULL) { k5_mutex_unlock(&data->lock); return 0; } if (stat(data->filespec, &st)) { retval = errno; k5_mutex_unlock(&data->lock); return retval; } data->last_stat = now; #if defined HAVE_STRUCT_STAT_ST_MTIMENSEC frac = st.st_mtimensec; #elif defined HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC frac = st.st_mtimespec.tv_nsec; #elif defined HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC frac = st.st_mtim.tv_nsec; #else frac = 0; #endif if (st.st_mtime == data->timestamp && frac == data->frac_ts && data->root != NULL) { k5_mutex_unlock(&data->lock); return 0; } if (data->root) { profile_free_node(data->root); data->root = 0; } if (data->comment) { free(data->comment); data->comment = 0; } #else /* * If we don't have the stat() call, assume that our in-core * memory image is correct. That is, we won't reread the * profile file if it changes. */ if (data->root) { k5_mutex_unlock(&data->lock); return 0; } #endif errno = 0; f = fopen(data->filespec, "r"); if (f == NULL) { retval = errno; k5_mutex_unlock(&data->lock); if (retval == 0) retval = ENOENT; return retval; } set_cloexec_file(f); data->upd_serial++; data->flags &= PROFILE_FILE_SHARED; /* FIXME same as '=' operator */ retval = profile_parse_file(f, &data->root); fclose(f); if (retval) { k5_mutex_unlock(&data->lock); return retval; } assert(data->root != NULL); #ifdef HAVE_STAT data->timestamp = st.st_mtime; data->frac_ts = frac; #endif k5_mutex_unlock(&data->lock); return 0; }
int profile_lock_global() { return k5_mutex_lock(&g_shared_trees_mutex); }