/* * Resize the array elements. We reinitialize the update log rather than * unrolling the the log and copying it over to a temporary log for obvious * performance reasons. Slaves will subsequently do a full resync, but the * need for resizing should be very small. */ static krb5_error_code resize(kdb_hlog_t *ulog, uint32_t ulogentries, int ulogfd, unsigned int recsize) { unsigned int new_block, new_size; if (ulog == NULL) return KRB5_LOG_ERROR; new_size = sizeof(kdb_hlog_t); new_block = (recsize / ULOG_BLOCK) + 1; new_block *= ULOG_BLOCK; new_size += ulogentries * new_block; if (new_size > MAXLOGLEN) return KRB5_LOG_ERROR; /* Reinit log with new block size. */ memset(ulog, 0, sizeof(*ulog)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = new_block; ulog_sync_header(ulog); /* Expand log considering new block size. */ if (extend_file_to(ulogfd, new_size) < 0) return errno; return 0; }
/* * Resizes the array elements. We reinitialize the update log rather than * unrolling the the log and copying it over to a temporary log for obvious * performance reasons. Slaves will subsequently do a full resync, but * the need for resizing should be very small. */ static krb5_error_code ulog_resize(kdb_hlog_t *ulog, uint32_t ulogentries, int ulogfd, uint_t recsize) { uint_t new_block, new_size; if (ulog == NULL) return (KRB5_LOG_ERROR); new_size = sizeof (kdb_hlog_t); new_block = (recsize / ULOG_BLOCK) + 1; new_block *= ULOG_BLOCK; new_size += ulogentries * new_block; if (new_size <= MAXLOGLEN) { /* * Reinit log with new block size */ (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = new_block; ulog_sync_header(ulog); /* * Time to expand log considering new block size */ if (extend_file_to(ulogfd, new_size) < 0) return errno; } else { /* * Can't map into file larger than MAXLOGLEN */ return (KRB5_LOG_ERROR); } return (0); }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. * * Semantics for various values of caller: * * - FKPROPLOG * * Don't create if it doesn't exist, map as MAP_PRIVATE. * * - FKPROPD * * Create and initialize if need be, map as MAP_SHARED. * * - FKCOMMAND * * Create and [re-]initialize if need be, size appropriately, map as * MAP_SHARED. (Intended for kdb5_util create and kdb5_util load of * non-iprop dump.) * * - FKADMIN * * Create and [re-]initialize if need be, size appropriately, map as * MAP_SHARED, and check consistency and recover as necessary. (Intended * for kadmind and kadmin.local.) * * Returns 0 on success else failure. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries, int caller, char **db_args) { struct stat st; krb5_error_code retval; uint32_t ulog_filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; ulog_filesize = sizeof(kdb_hlog_t); if (stat(logname, &st) == -1) { /* File doesn't exist so we exit with kproplog. */ if (caller == FKPROPLOG) return errno; ulogfd = open(logname, O_RDWR | O_CREAT, 0600); if (ulogfd == -1) return errno; if (lseek(ulogfd, 0L, SEEK_CUR) == -1) return errno; if (caller == FKADMIND || caller == FKCOMMAND) ulog_filesize += ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, ulog_filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) return errno; } if (caller == FKPROPLOG) { if (fstat(ulogfd, &st) < 0) { close(ulogfd); return errno; } ulog_filesize = st.st_size; ulog = mmap(0, ulog_filesize, PROT_READ | PROT_WRITE, MAP_PRIVATE, ulogfd, 0); } else { /* kadmind, kpropd, & kcommands should udpate stores. */ ulog = mmap(0, MAXLOGLEN, PROT_READ | PROT_WRITE, MAP_SHARED, ulogfd, 0); } if (ulog == MAP_FAILED) { /* Can't map update log file to memory. */ close(ulogfd); return errno; } if (!context->kdblog_context) { log_ctx = k5alloc(sizeof(kdb_log_context), &retval); if (log_ctx == NULL) return retval; memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else { log_ctx = context->kdblog_context; } log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC && ulog->kdb_hmagic != 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return KRB5_LOG_CORRUPT; } if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { reset_header(ulog); if (caller != FKPROPLOG) ulog_sync_header(ulog); ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; } if (caller == FKPROPLOG || caller == FKPROPD) { /* kproplog and kpropd don't need to do anything else. */ ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; } assert(caller == FKADMIND || caller == FKCOMMAND); /* Reinit ulog if the log is being truncated or expanded after we have * circled. */ if (ulog->kdb_num != ulogentries) { if (ulog->kdb_num != 0 && (ulog->kdb_last_sno > ulog->kdb_num || ulog->kdb_num > ulogentries)) { reset_header(ulog); ulog_sync_header(ulog); } /* Expand ulog if we have specified a greater size. */ if (ulog->kdb_num < ulogentries) { ulog_filesize += ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, ulog_filesize) < 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return errno; } } } ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return 0; }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries) { struct stat st; krb5_error_code retval; uint32_t filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; if (stat(logname, &st) == -1) { ulogfd = open(logname, O_RDWR | O_CREAT, 0600); if (ulogfd == -1) return errno; filesize = sizeof(kdb_hlog_t) + ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) return errno; } ulog = mmap(0, MAXLOGLEN, PROT_READ | PROT_WRITE, MAP_SHARED, ulogfd, 0); if (ulog == MAP_FAILED) { /* Can't map update log file to memory. */ close(ulogfd); return errno; } if (!context->kdblog_context) { log_ctx = k5alloc(sizeof(kdb_log_context), &retval); if (log_ctx == NULL) return retval; memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else { log_ctx = context->kdblog_context; } log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; retval = lock_ulog(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { if (ulog->kdb_hmagic != 0) { unlock_ulog(context); return KRB5_LOG_CORRUPT; } reset_header(ulog); sync_header(ulog); } /* Reinit ulog if ulogentries changed such that we have too many entries or * our first or last entry was written to the wrong location. */ if (ulog->kdb_num != 0 && (ulog->kdb_num > ulogentries || !check_sno(log_ctx, ulog->kdb_first_sno, &ulog->kdb_first_time) || !check_sno(log_ctx, ulog->kdb_last_sno, &ulog->kdb_last_time))) { reset_header(ulog); sync_header(ulog); } if (ulog->kdb_num != ulogentries) { /* Expand the ulog file if it isn't big enough. */ filesize = sizeof(kdb_hlog_t) + ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, filesize) < 0) { unlock_ulog(context); return errno; } } unlock_ulog(context); return 0; }
/* * Map the log file to memory for performance and simplicity. * * Called by: if iprop_enabled then ulog_map(); * Assumes that the caller will terminate on ulog_map, hence munmap and * closing of the fd are implicitly performed by the caller. * Returns 0 on success else failure. */ krb5_error_code ulog_map(krb5_context context, const char *logname, uint32_t ulogentries, int caller, char **db_args) { struct stat st; krb5_error_code retval; uint32_t ulog_filesize; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; int ulogfd = -1; ulog_filesize = sizeof (kdb_hlog_t); if (stat(logname, &st) == -1) { if (caller == FKPROPLOG) { /* * File doesn't exist so we exit with kproplog */ return (errno); } if ((ulogfd = open(logname, O_RDWR+O_CREAT, 0600)) == -1) { return (errno); } if (lseek(ulogfd, 0L, SEEK_CUR) == -1) { return (errno); } if ((caller == FKADMIND) || (caller == FKCOMMAND)) ulog_filesize += ulogentries * ULOG_BLOCK; if (extend_file_to(ulogfd, ulog_filesize) < 0) return errno; } else { ulogfd = open(logname, O_RDWR, 0600); if (ulogfd == -1) /* * Can't open existing log file */ return errno; } if (caller == FKPROPLOG) { if (fstat(ulogfd, &st) < 0) { close(ulogfd); return errno; } ulog_filesize = st.st_size; ulog = (kdb_hlog_t *)mmap(0, ulog_filesize, PROT_READ+PROT_WRITE, MAP_PRIVATE, ulogfd, 0); } else { /* * else kadmind, kpropd, & kcommands should udpate stores */ ulog = (kdb_hlog_t *)mmap(0, MAXLOGLEN, PROT_READ+PROT_WRITE, MAP_SHARED, ulogfd, 0); } if (ulog == MAP_FAILED) { /* * Can't map update log file to memory */ close(ulogfd); return (errno); } if (!context->kdblog_context) { if (!(log_ctx = malloc(sizeof (kdb_log_context)))) return (errno); memset(log_ctx, 0, sizeof(*log_ctx)); context->kdblog_context = log_ctx; } else log_ctx = context->kdblog_context; log_ctx->ulog = ulog; log_ctx->ulogentries = ulogentries; log_ctx->ulogfd = ulogfd; if (ulog->kdb_hmagic != KDB_ULOG_HDR_MAGIC) { if (ulog->kdb_hmagic == 0) { /* * New update log */ (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = ULOG_BLOCK; if (!(caller == FKPROPLOG)) ulog_sync_header(ulog); } else { return (KRB5_LOG_CORRUPT); } } if (caller == FKADMIND) { retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; switch (ulog->kdb_state) { case KDB_STABLE: case KDB_UNSTABLE: /* * Log is currently un/stable, check anyway */ retval = ulog_check(context, ulog, db_args); ulog_lock(context, KRB5_LOCKMODE_UNLOCK); if (retval == KRB5_LOG_CORRUPT) { return (retval); } break; case KDB_CORRUPT: ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_CORRUPT); default: /* * Invalid db state */ ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (KRB5_LOG_ERROR); } } else if ((caller == FKPROPLOG) || (caller == FKPROPD)) { /* * kproplog and kpropd don't need to do anything else */ return (0); } /* * Reinit ulog if the log is being truncated or expanded after * we have circled. */ retval = ulog_lock(context, KRB5_LOCKMODE_EXCLUSIVE); if (retval) return retval; if (ulog->kdb_num != ulogentries) { if ((ulog->kdb_num != 0) && ((ulog->kdb_last_sno > ulog->kdb_num) || (ulog->kdb_num > ulogentries))) { (void) memset(ulog, 0, sizeof (kdb_hlog_t)); ulog->kdb_hmagic = KDB_ULOG_HDR_MAGIC; ulog->db_version_num = KDB_VERSION; ulog->kdb_state = KDB_STABLE; ulog->kdb_block = ULOG_BLOCK; ulog_sync_header(ulog); } /* * Expand ulog if we have specified a greater size */ if (ulog->kdb_num < ulogentries) { ulog_filesize += ulogentries * ulog->kdb_block; if (extend_file_to(ulogfd, ulog_filesize) < 0) { ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return errno; } } } ulog_lock(context, KRB5_LOCKMODE_UNLOCK); return (0); }