/* Set the header log details on the slave and sync it to file. */ static void ulog_finish_update_slave(kdb_hlog_t *ulog, kdb_last_t lastentry) { ulog->kdb_last_sno = lastentry.last_sno; ulog->kdb_last_time = lastentry.last_time; ulog_sync_header(ulog); }
/* Mark the log entry as committed and sync the memory mapped log to file. */ krb5_error_code ulog_finish_update(krb5_context context, kdb_incr_update_t *upd) { krb5_error_code retval; kdb_ent_header_t *indx_log; unsigned int i; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; i = (upd->kdb_entry_sno - 1) % ulogentries; indx_log = INDEX(ulog, i); indx_log->kdb_commit = TRUE; ulog->kdb_state = KDB_STABLE; retval = sync_update(ulog, indx_log); if (retval) return retval; ulog_sync_header(ulog); return 0; }
/* * Resize the array elements. We reinitialize the update log rather than * unrolling the the log and copying it over to a temporary log for obvious * performance reasons. Slaves will subsequently do a full resync, but the * need for resizing should be very small. */ static krb5_error_code resize(kdb_hlog_t *ulog, uint32_t ulogentries, int ulogfd, unsigned int recsize) { unsigned int new_block, new_size; if (ulog == NULL) return KRB5_LOG_ERROR; new_size = sizeof(kdb_hlog_t); new_block = (recsize / ULOG_BLOCK) + 1; new_block *= ULOG_BLOCK; new_size += ulogentries * new_block; if (new_size > MAXLOGLEN) return KRB5_LOG_ERROR; /* Reinit log with new block size. */ memset(ulog, 0, sizeof(*ulog)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = new_block; ulog_sync_header(ulog); /* Expand log considering new block size. */ if (extend_file_to(ulogfd, new_size) < 0) return errno; return 0; }
/* Reinitialize the log header. Locking is the caller's responsibility. */ void ulog_init_header(krb5_context context) { kdb_log_context *log_ctx; kdb_hlog_t *ulog; INIT_ULOG(context); reset_header(ulog); ulog_sync_header(ulog); }
/* * Resizes the array elements. We reinitialize the update log rather than * unrolling the the log and copying it over to a temporary log for obvious * performance reasons. Slaves will subsequently do a full resync, but * the need for resizing should be very small. */ static krb5_error_code ulog_resize(kdb_hlog_t *ulog, uint32_t ulogentries, int ulogfd, uint_t recsize) { uint_t new_block, new_size; if (ulog == NULL) return (KRB5_LOG_ERROR); new_size = sizeof (kdb_hlog_t); new_block = (recsize / ULOG_BLOCK) + 1; new_block *= ULOG_BLOCK; new_size += ulogentries * new_block; if (new_size <= MAXLOGLEN) { /* * Reinit log with new block size */ (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = new_block; ulog_sync_header(ulog); /* * Time to expand log considering new block size */ if (extend_file_to(ulogfd, new_size) < 0) return errno; } else { /* * Can't map into file larger than MAXLOGLEN */ return (KRB5_LOG_ERROR); } return (0); }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. * * Semantics for various values of caller: * * - FKPROPLOG * * Don't create if it doesn't exist, map as MAP_PRIVATE. * * - FKPROPD * * Create and initialize if need be, map as MAP_SHARED. * * - FKCOMMAND * * Create and [re-]initialize if need be, size appropriately, map as * MAP_SHARED. (Intended for kdb5_util create and kdb5_util load of * non-iprop dump.) * * - FKADMIN * * Create and [re-]initialize if need be, size appropriately, map as * MAP_SHARED, and check consistency and recover as necessary. (Intended * for kadmind and kadmin.local.) * * Returns 0 on success else failure. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries, int caller, char **db_args) { struct stat st; krb5_error_code retval; uint32_t ulog_filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; ulog_filesize = sizeof(kdb_hlog_t); if (stat(logname, &st) == -1) { /* File doesn't exist so we exit with kproplog. */ if (caller == FKPROPLOG) return errno; ulogfd = open(logname, O_RDWR | O_CREAT, 0600); if (ulogfd == -1) return errno; if (lseek(ulogfd, 0L, SEEK_CUR) == -1) return errno; if (caller == FKADMIND || caller == FKCOMMAND) ulog_filesize += ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, ulog_filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) return errno; } if (caller == FKPROPLOG) { if (fstat(ulogfd, &st) < 0) { close(ulogfd); return errno; } ulog_filesize = st.st_size; ulog = mmap(0, ulog_filesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, ulogfd, 0); } else { /* kadmind, kpropd, & kcommands should udpate stores. */ ulog = mmap(0, MAXLOGLEN, PROT_READ | PROT_WRITE, MAP_SHARED, ulogfd, 0); } if (ulog == MAP_FAILED) { /* Can't map update log file to memory. */ close(ulogfd); return errno; } if (!context->kdblog_context) { log_ctx = k5alloc(sizeof(kdb_log_context), &retval); if (log_ctx == NULL) return retval; memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else { log_ctx = context->kdblog_context; } log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC && ulog->kdb_hmagic != 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return KRB5_LOG_CORRUPT; } if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { reset_header(ulog); if (caller != FKPROPLOG) ulog_sync_header(ulog); ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; } if (caller == FKPROPLOG || caller == FKPROPD) { /* kproplog and kpropd don't need to do anything else. */ ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; } assert(caller == FKADMIND || caller == FKCOMMAND); /* Reinit ulog if the log is being truncated or expanded after we have * circled. */ if (ulog->kdb_num != ulogentries) { if (ulog->kdb_num != 0 && (ulog->kdb_last_sno > ulog->kdb_num || ulog->kdb_num > ulogentries)) { reset_header(ulog); ulog_sync_header(ulog); } /* Expand ulog if we have specified a greater size. */ if (ulog->kdb_num < ulogentries) { ulog_filesize += ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, ulog_filesize) < 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return errno; } } } ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; }
/* Used by the slave to update its hash db from* the incr update log. Must be * called with lock held. */ krb5_error_code ulog_replay(krb5_context context, kdb_incr_result_t *incr_ret, char **db_args) { krb5_db_entry *entry = NULL; kdb_incr_update_t *upd = NULL, *fupd; int i, no_of_updates; krb5_error_code retval; krb5_principal dbprinc; kdb_last_t errlast, *last; char *dbprincstr; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; INIT_ULOG(context); no_of_updates = incr_ret->updates.kdb_ulog_t_len; upd = incr_ret->updates.kdb_ulog_t_val; fupd = upd; /* We reset last_sno and last_time to 0, if krb5_db2_db_put_principal or * krb5_db2_db_delete_principal fail. */ errlast.last_sno = (unsigned int)0; errlast.last_time.seconds = (unsigned int)0; errlast.last_time.useconds = (unsigned int)0; last = &errlast; retval = krb5_db_open(context, db_args, KRB5_KDB_OPEN_RW | KRB5_KDB_SRV_TYPE_ADMIN); if (retval) goto cleanup; for (i = 0; i < no_of_updates; i++) { if (!upd->kdb_commit) continue; if (upd->kdb_deleted) { dbprincstr = k5memdup0(upd->kdb_princ_name.utf8str_t_val, upd->kdb_princ_name.utf8str_t_len, &retval); if (dbprincstr == NULL) goto cleanup; retval = krb5_parse_name(context, dbprincstr, &dbprinc); free(dbprincstr); if (retval) goto cleanup; retval = krb5int_delete_principal_no_log(context, dbprinc); krb5_free_principal(context, dbprinc); if (retval) goto cleanup; } else { entry = k5alloc(sizeof(krb5_db_entry), &retval); if (entry == NULL) goto cleanup; retval = ulog_conv_2dbentry(context, &entry, upd); if (retval) goto cleanup; retval = krb5int_put_principal_no_log(context, entry); krb5_db_free_principal(context, entry); if (retval) goto cleanup; } upd++; } last = &incr_ret->lastentry; cleanup: if (fupd) ulog_free_entries(fupd, no_of_updates); /* Record a new last serial number and timestamp in the ulog header. */ ulog->kdb_last_sno = last->last_sno; ulog->kdb_last_time = last->last_time; ulog_sync_header(ulog); return retval; }
/* * Add an entry to the update log. The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ krb5_error_code ulog_add_update(krb5_context context, kdb_incr_update_t *upd) { XDR xdrs; kdbe_time_t ktime; kdb_ent_header_t *indx_log; unsigned int i, recsize; unsigned long upd_size; krb5_error_code retval; kdb_sno_t cur_sno; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; int ulogfd; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; ulogfd = log_ctx->ulogfd; if (upd == NULL) return KRB5_LOG_ERROR; time_current(&ktime); upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof(kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { retval = resize(ulog, ulogentries, ulogfd, recsize); if (retval) return retval; } /* If we have reached the last possible serial number, reinitialize the * ulog and start over. Slaves will do a full resync. */ if (ulog->kdb_last_sno == (kdb_sno_t)-1) reset_header(ulog); /* Get the next serial number and save it for finish_update() to index. */ cur_sno = ulog->kdb_last_sno + 1; upd->kdb_entry_sno = cur_sno; i = (cur_sno - 1) % ulogentries; indx_log = INDEX(ulog, i); memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = cur_sno; indx_log->kdb_time = upd->kdb_time = ktime; indx_log->kdb_commit = upd->kdb_commit = FALSE; ulog->kdb_state = KDB_UNSTABLE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return KRB5_LOG_CONV; retval = sync_update(ulog, indx_log); if (retval) return retval; if (ulog->kdb_num < ulogentries) ulog->kdb_num++; ulog->kdb_last_sno = cur_sno; ulog->kdb_last_time = ktime; if (cur_sno > ulogentries) { /* Once we've circled, kdb_first_sno is the sno of the next entry. */ i = upd->kdb_entry_sno % ulogentries; indx_log = INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } else if (cur_sno == 1) { /* This is the first update. */ ulog->kdb_first_sno = 1; ulog->kdb_first_time = indx_log->kdb_time; } ulog_sync_header(ulog); return 0; }
/* * Add an entry to the update log. The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ krb5_error_code ulog_add_update(krb5_context context, kdb_incr_update_t *upd) { XDR xdrs; kdbe_time_t ktime; kdb_ent_header_t *indx_log; unsigned int i, recsize; unsigned long upd_size; krb5_error_code retval; kdb_sno_t cur_sno; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; int ulogfd; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; ulogfd = log_ctx->ulogfd; if (upd == NULL) return KRB5_LOG_ERROR; time_current(&ktime); upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof(kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { retval = ulog_resize(ulog, ulogentries, ulogfd, recsize); if (retval) return retval; } cur_sno = ulog->kdb_last_sno; /* * If we need to, wrap our sno around to 1. A slaves will do a full resync * since its sno will be out of range of the ulog (or in extreme cases, * its timestamp won't match). */ if (cur_sno == (kdb_sno_t)-1) cur_sno = 1; else cur_sno++; /* Squirrel this away for finish_update() to index. */ upd->kdb_entry_sno = cur_sno; i = (cur_sno - 1) % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = cur_sno; indx_log->kdb_time = upd->kdb_time = ktime; indx_log->kdb_commit = upd->kdb_commit = FALSE; ulog->kdb_state = KDB_UNSTABLE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return KRB5_LOG_CONV; retval = ulog_sync_update(ulog, indx_log); if (retval) return retval; if (ulog->kdb_num < ulogentries) ulog->kdb_num++; ulog->kdb_last_sno = cur_sno; ulog->kdb_last_time = ktime; if (cur_sno > ulogentries) { /* Once we've circled, kdb_first_sno is the sno of the next entry. */ i = upd->kdb_entry_sno % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } else if (cur_sno == 1) { /* This is the first update, or we wrapped. */ ulog->kdb_first_sno = 1; ulog->kdb_first_time = indx_log->kdb_time; } ulog_sync_header(ulog); return 0; }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. * Returns 0 on success else failure. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries, int caller, char **db_args) { struct stat st; krb5_error_code retval; uint32_t ulog_filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; ulog_filesize = sizeof (kdb_hlog_t); if (stat(logname, &st) == -1) { if (caller == FKPROPLOG) { /* * File doesn't exist so we exit with kproplog */ return (errno); } if ((ulogfd = open(logname, O_RDWR+O_CREAT, 0600)) == -1) { return (errno); } if (lseek(ulogfd, 0L, SEEK_CUR) == -1) { return (errno); } if ((caller == FKADMIND) || (caller == FKCOMMAND)) ulog_filesize += ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, ulog_filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) /* * Can't open existing log file */ return errno; } if (caller == FKPROPLOG) { if (fstat(ulogfd, &st) < 0) { close(ulogfd); return errno; } ulog_filesize = st.st_size; ulog = (kdb_hlog_t *)mmap(0, ulog_filesize, PROT_READ+PROT_WRITE, MAP_PRIVATE, ulogfd, 0); } else { /* * else kadmind, kpropd, & kcommands should udpate stores */ ulog = (kdb_hlog_t *)mmap(0, MAXLOGLEN, PROT_READ+PROT_WRITE, MAP_SHARED, ulogfd, 0); } if (ulog == MAP_FAILED) { /* * Can't map update log file to memory */ close(ulogfd); return (errno); } if (!context->kdblog_context) { if (!(log_ctx = malloc(sizeof (kdb_log_context)))) return (errno); memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else log_ctx = context->kdblog_context; log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { if (ulog->kdb_hmagic == 0) { /* * New update log */ (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = ULOG_BLOCK; if (!(caller == FKPROPLOG)) ulog_sync_header(ulog); } else { return (KRB5_LOG_CORRUPT); } } if (caller == FKADMIND) { retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; switch (ulog->kdb_state) { case KDB_STABLE: case KDB_UNSTABLE: /* * Log is currently un/stable, check anyway */ retval = ulog_check(context, ulog, db_args); ulog_lock(context, KRB5_LOCKMODE_UNLOCK); if (retval == KRB5_LOG_CORRUPT) { return (retval); } break; case KDB_CORRUPT: ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_CORRUPT); default: /* * Invalid db state */ ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_ERROR); } } else if ((caller == FKPROPLOG) || (caller == FKPROPD)) { /* * kproplog and kpropd don't need to do anything else */ return (0); } /* * Reinit ulog if the log is being truncated or expanded after * we have circled. */ retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_num != ulogentries) { if ((ulog->kdb_num != 0) && ((ulog->kdb_last_sno > ulog->kdb_num) || (ulog->kdb_num > ulogentries))) { (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = ULOG_BLOCK; ulog_sync_header(ulog); } /* * Expand ulog if we have specified a greater size */ if (ulog->kdb_num < ulogentries) { ulog_filesize += ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, ulog_filesize) < 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return errno; } } } ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (0); }
/* * Validate the log file and resync any uncommitted update entries * to the principal database. * * Must be called with lock held. */ static krb5_error_code ulog_check(krb5_context context, kdb_hlog_t *ulog, char **db_args) { XDR xdrs; krb5_error_code retval = 0; unsigned int i; kdb_ent_header_t *indx_log; kdb_incr_update_t *upd = NULL; kdb_incr_result_t *incr_ret = NULL; ulog->kdb_state = KDB_STABLE; for (i = 0; i < ulog->kdb_num; i++) { indx_log = (kdb_ent_header_t *)INDEX(ulog, i); if (indx_log->kdb_umagic != KDB_ULOG_MAGIC) { /* * Update entry corrupted we should scream and die */ ulog->kdb_state = KDB_CORRUPT; retval = KRB5_LOG_CORRUPT; break; } if (indx_log->kdb_commit == FALSE) { ulog->kdb_state = KDB_UNSTABLE; incr_ret = (kdb_incr_result_t *) malloc(sizeof (kdb_incr_result_t)); if (incr_ret == NULL) { retval = errno; goto error; } upd = (kdb_incr_update_t *) malloc(sizeof (kdb_incr_update_t)); if (upd == NULL) { retval = errno; goto error; } (void) memset(upd, 0, sizeof (kdb_incr_update_t)); xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_DECODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) { retval = KRB5_LOG_CONV; goto error; } incr_ret->updates.kdb_ulog_t_len = 1; incr_ret->updates.kdb_ulog_t_val = upd; upd->kdb_commit = TRUE; /* * We don't want to readd this update and just use the * existing update to be propagated later on */ ulog_set_role(context, IPROP_NULL); retval = ulog_replay(context, incr_ret, db_args); /* * upd was freed by ulog_replay, we NULL * the pointer in case we subsequently break from loop. */ upd = NULL; if (incr_ret) { free(incr_ret); incr_ret = NULL; } ulog_set_role(context, IPROP_MASTER); if (retval) goto error; /* * We flag this as committed since this was * the last entry before kadmind crashed, ergo * the slaves have not seen this update before */ indx_log->kdb_commit = TRUE; retval = ulog_sync_update(ulog, indx_log); if (retval) goto error; ulog->kdb_state = KDB_STABLE; } } error: if (upd) ulog_free_entries(upd, 1); free(incr_ret); ulog_sync_header(ulog); return (retval); }
/* * Adds an entry to the update log. * The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ krb5_error_code ulog_add_update(krb5_context context, kdb_incr_update_t *upd) { XDR xdrs; kdbe_time_t ktime; struct timeval timestamp; kdb_ent_header_t *indx_log; uint_t i, recsize; ulong_t upd_size; krb5_error_code retval; kdb_sno_t cur_sno; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; int ulogfd; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; ulogfd = log_ctx->ulogfd; if (upd == NULL) return (KRB5_LOG_ERROR); (void) gettimeofday(×tamp, NULL); ktime.seconds = timestamp.tv_sec; ktime.useconds = timestamp.tv_usec; upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof (kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { if ((retval = ulog_resize(ulog, ulogentries, ulogfd, recsize))) { /* Resize element array failed */ return (retval); } } cur_sno = ulog->kdb_last_sno; /* * We need to overflow our sno, replicas will do full * resyncs once they see their sno > than the masters. */ if (cur_sno == (kdb_sno_t)-1) cur_sno = 1; else cur_sno++; /* * We squirrel this away for finish_update() to index */ upd->kdb_entry_sno = cur_sno; i = (cur_sno - 1) % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); (void) memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = cur_sno; indx_log->kdb_time = upd->kdb_time = ktime; indx_log->kdb_commit = upd->kdb_commit = FALSE; ulog->kdb_state = KDB_UNSTABLE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return (KRB5_LOG_CONV); if ((retval = ulog_sync_update(ulog, indx_log))) return (retval); if (ulog->kdb_num < ulogentries) ulog->kdb_num++; ulog->kdb_last_sno = cur_sno; ulog->kdb_last_time = ktime; /* * Since this is a circular array, once we circled, kdb_first_sno is * always kdb_entry_sno + 1. */ if (cur_sno > ulogentries) { i = upd->kdb_entry_sno % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } else if (cur_sno == 1) { ulog->kdb_first_sno = 1; ulog->kdb_first_time = indx_log->kdb_time; } ulog_sync_header(ulog); return (0); }