/* Stop memory getting swapped out */ static void _lock_mem(void) { #ifdef MCL_CURRENT if (mlockall(MCL_CURRENT | MCL_FUTURE)) log_sys_error("mlockall", ""); else log_very_verbose("Locking memory"); #endif _allocate_memory(); errno = 0; if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno) log_sys_error("getpriority", ""); else if (setpriority(PRIO_PROCESS, 0, _default_priority)) log_error("setpriority %d failed: %s", _default_priority, strerror(errno)); }
/* Stop memory getting swapped out */ static void _lock_mem(struct cmd_context *cmd) { _allocate_memory(); (void)strerror(0); /* Force libc.mo load */ (void)dm_udev_get_sync_support(); /* udev is initialized */ log_very_verbose("Locking memory"); /* * For daemon we need to use mlockall() * so even future adition of thread which may not even use lvm lib * will not block memory locked thread * Note: assuming _memlock_count_daemon is updated before _memlock_count */ _use_mlockall = _memlock_count_daemon ? 1 : find_config_tree_bool(cmd, activation_use_mlockall_CFG, NULL); if (!_use_mlockall) { if (!*_procselfmaps && dm_snprintf(_procselfmaps, sizeof(_procselfmaps), "%s" SELF_MAPS, cmd->proc_dir) < 0) { log_error("proc_dir too long"); return; } if (!(_maps_fd = open(_procselfmaps, O_RDONLY))) { log_sys_error("open", _procselfmaps); return; } if (!_disable_mmap()) stack; } if (!_memlock_maps(cmd, LVM_MLOCK, &_mstats)) stack; errno = 0; if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno) log_sys_error("getpriority", ""); else if (setpriority(PRIO_PROCESS, 0, _default_priority)) log_error("setpriority %d failed: %s", _default_priority, strerror(errno)); }