void critical_section_dec(struct cmd_context *cmd, const char *reason) { if (_critical_section && !dm_get_suspended_counter()) { _critical_section = 0; log_debug_mem("Leaving critical section (%s).", reason); } }
void memlock_reset(void) { log_debug_mem("memlock reset."); _mem_locked = 0; _critical_section = 0; _memlock_count_daemon = 0; }
static void _unlock_mem(struct cmd_context *cmd) { size_t unlock_mstats; log_very_verbose("Unlocking memory"); if (!_memlock_maps(cmd, LVM_MUNLOCK, &unlock_mstats)) stack; if (!_use_mlockall) { _restore_mmap(); if (close(_maps_fd)) log_sys_error("close", _procselfmaps); dm_free(_maps_buffer); _maps_buffer = NULL; if (_mstats < unlock_mstats) { if ((_mstats + lvm_getpagesize()) < unlock_mstats) log_error(INTERNAL_ERROR "Reserved memory (%ld) not enough: used %ld. Increase activation/reserved_memory?", (long)_mstats, (long)unlock_mstats); else /* FIXME Believed due to incorrect use of yes_no_prompt while locks held */ log_debug_mem("Suppressed internal error: Maps lock %ld < unlock %ld, a one-page difference.", (long)_mstats, (long)unlock_mstats); } } if (setpriority(PRIO_PROCESS, 0, _priority)) log_error("setpriority %u failed: %s", _priority, strerror(errno)); _release_memory(); }
void memlock_inc_daemon(struct cmd_context *cmd) { ++_memlock_count_daemon; if (_memlock_count_daemon == 1 && _critical_section > 0) log_error(INTERNAL_ERROR "_memlock_inc_daemon used in critical section."); log_debug_mem("memlock_count_daemon inc to %d", _memlock_count_daemon); _lock_mem_if_needed(cmd); }
static int _disable_mmap(void) { #ifdef ARCH_X86 volatile unsigned char *abs_addr; if (!_mmap_addr) { _mmap_addr = (unsigned char *) dlsym(RTLD_NEXT, "mmap"); if (_mmap_addr[0] == 0xff && _mmap_addr[1] == 0x25) { /* plt */ #ifdef __x86_64__ abs_addr = _mmap_addr + 6 + *(int32_t *)(_mmap_addr + 2); #endif /* __x86_64__ */ #ifdef __i386__ abs_addr = *(void **)(_mmap_addr + 2); #endif /* __i386__ */ _mmap_addr = *(void **)abs_addr; } else log_debug_mem("Can't find PLT jump entry assuming -fPIE linkage."); if (mprotect((void *)((unsigned long)_mmap_addr & ~4095UL), 4096, PROT_READ|PROT_WRITE|PROT_EXEC)) { log_sys_error("mprotect", ""); _mmap_addr = NULL; return 0; } _mmap_orig = *_mmap_addr; } log_debug_mem("Remapping mmap entry %02x to %02x.", _mmap_orig, INSTRUCTION_HLT); *_mmap_addr = INSTRUCTION_HLT; #ifdef __i386__ if (!_mmap64_addr) { _mmap64_addr = (unsigned char *) dlsym(RTLD_NEXT, "mmap64"); if (_mmap64_addr[0] == 0xff && _mmap64_addr[1] == 0x25) { abs_addr = *(void **)(_mmap64_addr + 2); _mmap64_addr = *(void **)abs_addr; } /* Can't find PLT jump entry assuming -fPIE linkage */ if (mprotect((void *)((unsigned long)_mmap64_addr & ~4095UL), 4096, PROT_READ|PROT_WRITE|PROT_EXEC)) { log_sys_error("mprotect", ""); _mmap64_addr = NULL; return 0; } _mmap64_orig = *_mmap64_addr; } *_mmap64_addr = INSTRUCTION_HLT; #endif /* __i386__ */ #endif /* ARCH_X86 */ return 1; }
void memlock_dec_daemon(struct cmd_context *cmd) { if (!_memlock_count_daemon) log_error(INTERNAL_ERROR "_memlock_count_daemon has dropped below 0."); --_memlock_count_daemon; log_debug_mem("memlock_count_daemon dec to %d", _memlock_count_daemon); _unlock_mem_if_possible(cmd); }
static void _lock_mem_if_needed(struct cmd_context *cmd) { log_debug_mem("Lock: Memlock counters: locked:%d critical:%d daemon:%d suspended:%d", _mem_locked, _critical_section, _memlock_count_daemon, dm_get_suspended_counter()); if (!_mem_locked && ((_critical_section + _memlock_count_daemon) == 1)) { _mem_locked = 1; _lock_mem(cmd); } }
static void _unlock_mem_if_possible(struct cmd_context *cmd) { log_debug_mem("Unlock: Memlock counters: locked:%d critical:%d daemon:%d suspended:%d", _mem_locked, _critical_section, _memlock_count_daemon, dm_get_suspended_counter()); if (_mem_locked && !_critical_section && !_memlock_count_daemon) { _unlock_mem(cmd); _mem_locked = 0; } }
void memlock_dec_daemon(struct cmd_context *cmd) { if (!_memlock_count_daemon) log_error(INTERNAL_ERROR "_memlock_count_daemon has dropped below 0."); --_memlock_count_daemon; log_debug_mem("memlock_count_daemon dec to %d", _memlock_count_daemon); if (!_memlock_count_daemon && _critical_section && _mem_locked) { log_error("Unlocking daemon memory in critical section."); _unlock_mem(cmd); _mem_locked = 0; } _unlock_mem_if_possible(cmd); }
static int _restore_mmap(void) { #ifdef ARCH_X86 if (_mmap_addr) *_mmap_addr = _mmap_orig; #ifdef __i386__ if (_mmap64_addr) *_mmap64_addr = _mmap64_orig; #endif /* __i386__ */ log_debug_mem("Restored mmap entry."); #endif /* ARCH_X86 */ return 1; }
void critical_section_inc(struct cmd_context *cmd, const char *reason) { /* * Profiles are loaded on-demand so make sure that before * entering the critical section all needed profiles are * loaded to avoid the disk access later. */ (void) load_pending_profiles(cmd); if (!_critical_section) { _critical_section = 1; log_debug_mem("Entering critical section (%s).", reason); } _lock_mem_if_needed(cmd); }
static int _memlock_maps(struct cmd_context *cmd, lvmlock_t lock, size_t *mstats) { const struct dm_config_node *cn; char *line, *line_end; size_t len; ssize_t n; int ret = 1; if (_use_mlockall) { #ifdef MCL_CURRENT if (lock == LVM_MLOCK) { if (mlockall(MCL_CURRENT | MCL_FUTURE)) { log_sys_error("mlockall", ""); return 0; } } else { if (munlockall()) { log_sys_error("munlockall", ""); return 0; } } return 1; #else return 0; #endif } /* Reset statistic counters */ *mstats = 0; /* read mapping into a single memory chunk without reallocation * in the middle of reading maps file */ for (len = 0;;) { if (!_maps_buffer || len >= _maps_len) { if (_maps_buffer) _maps_len *= 2; if (!(line = dm_realloc(_maps_buffer, _maps_len))) { log_error("Allocation of maps buffer failed."); return 0; } _maps_buffer = line; } if (lseek(_maps_fd, 0, SEEK_SET)) log_sys_error("lseek", _procselfmaps); for (len = 0 ; len < _maps_len; len += n) { if (!(n = read(_maps_fd, _maps_buffer + len, _maps_len - len))) break; /* EOF */ if (n == -1) { log_sys_error("read", _procselfmaps); return 0; } } if (len < _maps_len) { /* fits in buffer */ _maps_buffer[len] = '\0'; break; } } line = _maps_buffer; cn = find_config_tree_array(cmd, activation_mlock_filter_CFG, NULL); while ((line_end = strchr(line, '\n'))) { *line_end = '\0'; /* remove \n */ if (!_maps_line(cn, lock, line, mstats)) ret = 0; line = line_end + 1; } log_debug_mem("%socked %ld bytes", (lock == LVM_MLOCK) ? "L" : "Unl", (long)*mstats); return ret; }
/* * mlock/munlock memory areas from /proc/self/maps * format described in kernel/Documentation/filesystem/proc.txt */ static int _maps_line(const struct dm_config_node *cn, lvmlock_t lock, const char *line, size_t *mstats) { const struct dm_config_value *cv; long from, to; int pos; unsigned i; char fr, fw, fx, fp; size_t sz; const char *lock_str = (lock == LVM_MLOCK) ? "mlock" : "munlock"; if (sscanf(line, "%lx-%lx %c%c%c%c%n", &from, &to, &fr, &fw, &fx, &fp, &pos) != 6) { log_error("Failed to parse maps line: %s", line); return 0; } /* Select readable maps */ if (fr != 'r') { log_debug_mem("%s area unreadable %s : Skipping.", lock_str, line); return 1; } /* always ignored areas */ for (i = 0; i < DM_ARRAY_SIZE(_ignore_maps); ++i) if (strstr(line + pos, _ignore_maps[i])) { log_debug_mem("%s ignore filter '%s' matches '%s': Skipping.", lock_str, _ignore_maps[i], line); return 1; } sz = to - from; if (!cn) { /* If no blacklist configured, use an internal set */ for (i = 0; i < DM_ARRAY_SIZE(_blacklist_maps); ++i) if (strstr(line + pos, _blacklist_maps[i])) { log_debug_mem("%s default filter '%s' matches '%s': Skipping.", lock_str, _blacklist_maps[i], line); return 1; } } else { for (cv = cn->v; cv; cv = cv->next) { if ((cv->type != DM_CFG_STRING) || !cv->v.str[0]) continue; if (strstr(line + pos, cv->v.str)) { log_debug_mem("%s_filter '%s' matches '%s': Skipping.", lock_str, cv->v.str, line); return 1; } } } #ifdef HAVE_VALGRIND /* * Valgrind is continually eating memory while executing code * so we need to deactivate check of locked memory size */ #ifndef VALGRIND_POOL if (RUNNING_ON_VALGRIND) #endif sz -= sz; /* = 0, but avoids getting warning about dead assigment */ #endif *mstats += sz; log_debug_mem("%s %10ldKiB %12lx - %12lx %c%c%c%c%s", lock_str, ((long)sz + 1023) / 1024, from, to, fr, fw, fx, fp, line + pos); if (lock == LVM_MLOCK) { if (mlock((const void*)from, sz) < 0) { log_sys_error("mlock", line); return 0; } } else { if (munlock((const void*)from, sz) < 0) { log_sys_error("munlock", line); return 0; } } return 1; }