/* Get the last set of updates seen, (last+1) to n is returned. */ krb5_error_code ulog_get_entries(krb5_context context, const kdb_last_t *last, kdb_incr_result_t *ulog_handle) { XDR xdrs; kdb_ent_header_t *indx_log; kdb_incr_update_t *upd; unsigned int indx, count; uint32_t sno; krb5_error_code retval; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; retval = ulog_lock(context, KRB5_LOCKMODE_SHARED); if (retval) return retval; /* If another process terminated mid-update, reset the ulog and force full * resyncs. */ if (ulog->kdb_state != KDB_STABLE) reset_header(ulog); /* If we have the same sno and timestamp, return a nil update. If a * different timestamp, the sno was reused and we need a full resync. */ if (last->last_sno == ulog->kdb_last_sno) { ulog_handle->ret = time_equal(&last->last_time, &ulog->kdb_last_time) ? UPDATE_NIL : UPDATE_FULL_RESYNC_NEEDED; goto cleanup; } /* We may have overflowed the update log or shrunk the log, or the client * may have created its ulog. */ if (last->last_sno > ulog->kdb_last_sno || last->last_sno < ulog->kdb_first_sno) { ulog_handle->lastentry.last_sno = ulog->kdb_last_sno; ulog_handle->ret = UPDATE_FULL_RESYNC_NEEDED; goto cleanup; } sno = last->last_sno; indx = (sno - 1) % ulogentries; indx_log = INDEX(ulog, indx); if (!time_equal(&indx_log->kdb_time, &last->last_time)) { /* We have time stamp mismatch or we no longer have the slave's last * sno, so we brute force it. */ ulog_handle->ret = UPDATE_FULL_RESYNC_NEEDED; goto cleanup; } count = ulog->kdb_last_sno - sno; upd = calloc(count, sizeof(kdb_incr_update_t)); if (upd == NULL) { ulog_handle->ret = UPDATE_ERROR; retval = ENOMEM; goto cleanup; } ulog_handle->updates.kdb_ulog_t_val = upd; for (; sno < ulog->kdb_last_sno; sno++) { indx = sno % ulogentries; indx_log = INDEX(ulog, indx); memset(upd, 0, sizeof(kdb_incr_update_t)); xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_DECODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) { ulog_handle->ret = UPDATE_ERROR; retval = KRB5_LOG_CONV; goto cleanup; } /* Mark commitment since we didn't want to decode and encode the incr * update record the first time. */ upd->kdb_commit = indx_log->kdb_commit; upd++; } ulog_handle->updates.kdb_ulog_t_len = count; ulog_handle->lastentry.last_sno = ulog->kdb_last_sno; ulog_handle->lastentry.last_time.seconds = ulog->kdb_last_time.seconds; ulog_handle->lastentry.last_time.useconds = ulog->kdb_last_time.useconds; ulog_handle->ret = UPDATE_OK; cleanup: (void)ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return retval; }
/* * Get the last set of updates seen, (last+1) to n is returned. */ krb5_error_code ulog_get_entries(krb5_context context, /* input - krb5 lib config */ kdb_last_t last, /* input - slave's last sno */ kdb_incr_result_t *ulog_handle) /* output - incr result for slave */ { XDR xdrs; kdb_ent_header_t *indx_log; kdb_incr_update_t *upd; uint_t indx, count, tdiff; uint32_t sno; krb5_error_code retval; struct timeval timestamp; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; retval = ulog_lock(context, KRB5_LOCKMODE_SHARED); if (retval) return retval; /* * Check to make sure we don't have a corrupt ulog first. */ if (ulog->kdb_state == KDB_CORRUPT) { ulog_handle->ret = UPDATE_ERROR; (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_CORRUPT); } gettimeofday(×tamp, NULL); tdiff = timestamp.tv_sec - ulog->kdb_last_time.seconds; if (tdiff <= ULOG_IDLE_TIME) { ulog_handle->ret = UPDATE_BUSY; (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (0); } /* * We need to lock out other processes here, such as kadmin.local, * since we are looking at the last_sno and looking up updates. So * we can share with other readers. */ retval = krb5_db_lock(context, KRB5_LOCKMODE_SHARED); if (retval) { (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (retval); } /* * We may have overflowed the update log or we shrunk the log, or * the client's ulog has just been created. */ if ((last.last_sno > ulog->kdb_last_sno) || (last.last_sno < ulog->kdb_first_sno) || (last.last_sno == 0)) { ulog_handle->lastentry.last_sno = ulog->kdb_last_sno; (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); (void) krb5_db_unlock(context); ulog_handle->ret = UPDATE_FULL_RESYNC_NEEDED; return (0); } else if (last.last_sno <= ulog->kdb_last_sno) { sno = last.last_sno; indx = (sno - 1) % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, indx); /* * Validate the time stamp just to make sure it was the same sno */ if ((indx_log->kdb_time.seconds == last.last_time.seconds) && (indx_log->kdb_time.useconds == last.last_time.useconds)) { /* * If we have the same sno we return success */ if (last.last_sno == ulog->kdb_last_sno) { (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); (void) krb5_db_unlock(context); ulog_handle->ret = UPDATE_NIL; return (0); } count = ulog->kdb_last_sno - sno; ulog_handle->updates.kdb_ulog_t_val = (kdb_incr_update_t *)malloc( sizeof (kdb_incr_update_t) * count); upd = ulog_handle->updates.kdb_ulog_t_val; if (upd == NULL) { (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); (void) krb5_db_unlock(context); ulog_handle->ret = UPDATE_ERROR; return (errno); } while (sno < ulog->kdb_last_sno) { indx = sno % ulogentries; indx_log = (kdb_ent_header_t *) INDEX(ulog, indx); (void) memset(upd, 0, sizeof (kdb_incr_update_t)); xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_DECODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) { (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); (void) krb5_db_unlock(context); ulog_handle->ret = UPDATE_ERROR; return (KRB5_LOG_CONV); } /* * Mark commitment since we didn't * want to decode and encode the * incr update record the first time. */ upd->kdb_commit = indx_log->kdb_commit; upd++; sno++; } /* while */ ulog_handle->updates.kdb_ulog_t_len = count; ulog_handle->lastentry.last_sno = ulog->kdb_last_sno; ulog_handle->lastentry.last_time.seconds = ulog->kdb_last_time.seconds; ulog_handle->lastentry.last_time.useconds = ulog->kdb_last_time.useconds; ulog_handle->ret = UPDATE_OK; (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); (void) krb5_db_unlock(context); return (0); } else { /* * We have time stamp mismatch or we no longer have * the slave's last sno, so we brute force it */ (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); (void) krb5_db_unlock(context); ulog_handle->ret = UPDATE_FULL_RESYNC_NEEDED; return (0); } } /* * Should never get here, return error */ (void) ulog_lock(context, KRB5_LOCKMODE_UNLOCK); ulog_handle->ret = UPDATE_ERROR; return (KRB5_LOG_ERROR); }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. * * Semantics for various values of caller: * * - FKPROPLOG * * Don't create if it doesn't exist, map as MAP_PRIVATE. * * - FKPROPD * * Create and initialize if need be, map as MAP_SHARED. * * - FKCOMMAND * * Create and [re-]initialize if need be, size appropriately, map as * MAP_SHARED. (Intended for kdb5_util create and kdb5_util load of * non-iprop dump.) * * - FKADMIN * * Create and [re-]initialize if need be, size appropriately, map as * MAP_SHARED, and check consistency and recover as necessary. (Intended * for kadmind and kadmin.local.) * * Returns 0 on success else failure. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries, int caller, char **db_args) { struct stat st; krb5_error_code retval; uint32_t ulog_filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; ulog_filesize = sizeof(kdb_hlog_t); if (stat(logname, &st) == -1) { /* File doesn't exist so we exit with kproplog. */ if (caller == FKPROPLOG) return errno; ulogfd = open(logname, O_RDWR | O_CREAT, 0600); if (ulogfd == -1) return errno; if (lseek(ulogfd, 0L, SEEK_CUR) == -1) return errno; if (caller == FKADMIND || caller == FKCOMMAND) ulog_filesize += ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, ulog_filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) return errno; } if (caller == FKPROPLOG) { if (fstat(ulogfd, &st) < 0) { close(ulogfd); return errno; } ulog_filesize = st.st_size; ulog = mmap(0, ulog_filesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, ulogfd, 0); } else { /* kadmind, kpropd, & kcommands should udpate stores. */ ulog = mmap(0, MAXLOGLEN, PROT_READ | PROT_WRITE, MAP_SHARED, ulogfd, 0); } if (ulog == MAP_FAILED) { /* Can't map update log file to memory. */ close(ulogfd); return errno; } if (!context->kdblog_context) { log_ctx = k5alloc(sizeof(kdb_log_context), &retval); if (log_ctx == NULL) return retval; memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else { log_ctx = context->kdblog_context; } log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC && ulog->kdb_hmagic != 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return KRB5_LOG_CORRUPT; } if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { reset_header(ulog); if (caller != FKPROPLOG) ulog_sync_header(ulog); ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; } if (caller == FKPROPLOG || caller == FKPROPD) { /* kproplog and kpropd don't need to do anything else. */ ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; } assert(caller == FKADMIND || caller == FKCOMMAND); /* Reinit ulog if the log is being truncated or expanded after we have * circled. */ if (ulog->kdb_num != ulogentries) { if (ulog->kdb_num != 0 && (ulog->kdb_last_sno > ulog->kdb_num || ulog->kdb_num > ulogentries)) { reset_header(ulog); ulog_sync_header(ulog); } /* Expand ulog if we have specified a greater size. */ if (ulog->kdb_num < ulogentries) { ulog_filesize += ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, ulog_filesize) < 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return errno; } } } ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. * Returns 0 on success else failure. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries, int caller, char **db_args) { struct stat st; krb5_error_code retval; uint32_t ulog_filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; ulog_filesize = sizeof (kdb_hlog_t); if (stat(logname, &st) == -1) { if (caller == FKPROPLOG) { /* * File doesn't exist so we exit with kproplog */ return (errno); } if ((ulogfd = open(logname, O_RDWR+O_CREAT, 0600)) == -1) { return (errno); } if (lseek(ulogfd, 0L, SEEK_CUR) == -1) { return (errno); } if ((caller == FKADMIND) || (caller == FKCOMMAND)) ulog_filesize += ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, ulog_filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) /* * Can't open existing log file */ return errno; } if (caller == FKPROPLOG) { if (fstat(ulogfd, &st) < 0) { close(ulogfd); return errno; } ulog_filesize = st.st_size; ulog = (kdb_hlog_t *)mmap(0, ulog_filesize, PROT_READ+PROT_WRITE, MAP_PRIVATE, ulogfd, 0); } else { /* * else kadmind, kpropd, & kcommands should udpate stores */ ulog = (kdb_hlog_t *)mmap(0, MAXLOGLEN, PROT_READ+PROT_WRITE, MAP_SHARED, ulogfd, 0); } if (ulog == MAP_FAILED) { /* * Can't map update log file to memory */ close(ulogfd); return (errno); } if (!context->kdblog_context) { if (!(log_ctx = malloc(sizeof (kdb_log_context)))) return (errno); memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else log_ctx = context->kdblog_context; log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { if (ulog->kdb_hmagic == 0) { /* * New update log */ (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = ULOG_BLOCK; if (!(caller == FKPROPLOG)) ulog_sync_header(ulog); } else { return (KRB5_LOG_CORRUPT); } } if (caller == FKADMIND) { retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; switch (ulog->kdb_state) { case KDB_STABLE: case KDB_UNSTABLE: /* * Log is currently un/stable, check anyway */ retval = ulog_check(context, ulog, db_args); ulog_lock(context, KRB5_LOCKMODE_UNLOCK); if (retval == KRB5_LOG_CORRUPT) { return (retval); } break; case KDB_CORRUPT: ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_CORRUPT); default: /* * Invalid db state */ ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_ERROR); } } else if ((caller == FKPROPLOG) || (caller == FKPROPD)) { /* * kproplog and kpropd don't need to do anything else */ return (0); } /* * Reinit ulog if the log is being truncated or expanded after * we have circled. */ retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_num != ulogentries) { if ((ulog->kdb_num != 0) && ((ulog->kdb_last_sno > ulog->kdb_num) || (ulog->kdb_num > ulogentries))) { (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = ULOG_BLOCK; ulog_sync_header(ulog); } /* * Expand ulog if we have specified a greater size */ if (ulog->kdb_num < ulogentries) { ulog_filesize += ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, ulog_filesize) < 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return errno; } } } ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (0); }