int luksmeta_save(struct crypt_device *cd, int slot, const luksmeta_uuid_t uuid, const void *buf, size_t size) { uint32_t length = 0; lm_slot_t *s = NULL; lm_t lm = {}; int fd = -1; int r = 0; off_t off; if (uuid_is_zero(uuid)) return -EKEYREJECTED; fd = read_header(cd, O_RDWR | O_SYNC, &length, &lm); if (fd < 0) return fd; if (slot == CRYPT_ANY_SLOT) slot = find_unused_slot(cd, &lm); r = slot >= 0 && slot < LUKS_NSLOTS ? 0 : -EBADSLT; if (r < 0) goto error; s = &lm.slots[slot]; r = uuid_is_zero(s->uuid) ? 0 : -EALREADY; if (r < 0) goto error; s->offset = find_gap(&lm, length, size); r = s->offset >= ALIGN(sizeof(lm), true) ? 0 : -ENOSPC; if (r < 0) goto error; memcpy(s->uuid, uuid, sizeof(luksmeta_uuid_t)); s->length = size; s->crc32c = crc32c(0, buf, size); off = s->offset - sizeof(lm); r = lseek(fd, off, SEEK_CUR) == -1 ? -errno : 0; if (r < 0) goto error; r = writeall(fd, buf, size); if (r < 0) goto error; off = s->offset + s->length; r = lseek(fd, -off, SEEK_CUR) == -1 ? -errno : 0; if (r < 0) goto error; r = write_header(fd, lm); error: close(fd); return r < 0 ? r : slot; }
int tm_tokens_set_holder(const exa_uuid_t *uuid, exa_nodeid_t node_id, const os_net_addr_str_t node_addr) { token_t *t; OS_ASSERT(uuid != NULL && !uuid_is_zero(uuid)); OS_ASSERT(node_id != EXA_NODEID_NONE); OS_ASSERT(node_addr != NULL && os_net_ip_is_valid(node_addr)); t = __find_token(uuid); if (t == NULL) { t = __add_token(uuid); if (t == NULL) return -TM_ERR_TOO_MANY_TOKENS; } if (t->holder != EXA_NODEID_NONE && t->holder != node_id) return -TM_ERR_ANOTHER_HOLDER; t->holder = node_id; os_strlcpy(t->holder_addr, node_addr, sizeof(t->holder_addr)); return 0; }
static int read_header(struct crypt_device *cd, int flags, uint32_t *length, lm_t *lm) { uint32_t maxlen; int fd = -1; int r = 0; fd = open_hole(cd, flags, length); if (fd < 0) return fd; r = *length >= sizeof(lm_t) ? 0 : -ENOENT; if (r < 0) goto error; r = readall(fd, lm, sizeof(lm_t)); if (r < 0) goto error; r = memcmp(LM_MAGIC, lm->magic, sizeof(LM_MAGIC)) == 0 ? 0 : -ENOENT; if (r < 0) goto error; r = lm->version == htobe32(LM_VERSION) ? 0 : -ENOTSUP; if (r < 0) goto error; lm->crc32c = be32toh(lm->crc32c); r = checksum(*lm) == lm->crc32c ? 0 : -EINVAL; if (r < 0) goto error; lm->version = be32toh(lm->version); maxlen = *length - ALIGN(sizeof(lm_t), true); for (int slot = 0; slot < LUKS_NSLOTS; slot++) { lm_slot_t *s = &lm->slots[slot]; s->offset = be32toh(s->offset); s->length = be32toh(s->length); s->crc32c = be32toh(s->crc32c); if (!uuid_is_zero(s->uuid)) { r = s->offset > sizeof(lm_t) ? 0 : -EINVAL; if (r < 0) goto error; r = s->length <= maxlen ? 0 : -EINVAL; if (r < 0) goto error; } } return fd; error: close(fd); return r; }
int luksmeta_wipe(struct crypt_device *cd, int slot, const luksmeta_uuid_t uuid) { uint8_t *zero = NULL; uint32_t length = 0; lm_slot_t *s = NULL; lm_t lm = {}; int fd = -1; int r = 0; off_t off; if (slot < 0 || slot >= LUKS_NSLOTS) return -EBADSLT; s = &lm.slots[slot]; fd = read_header(cd, O_RDWR | O_SYNC, &length, &lm); if (fd < 0) return fd; r = uuid_is_zero(s->uuid) ? -EALREADY : 0; if (r < 0) goto error; if (uuid && memcmp(uuid, s->uuid, sizeof(luksmeta_uuid_t)) != 0) { r = -EKEYREJECTED; goto error; } off = s->offset - sizeof(lm_t); r = lseek(fd, off, SEEK_CUR) == -1 ? -errno : 0; if (r < 0) goto error; r = (zero = calloc(1, s->length)) ? 0 : -errno; if (r < 0) goto error; r = writeall(fd, zero, s->length); free(zero); if (r < 0) goto error; off = s->offset + s->length; r = lseek(fd, -off, SEEK_CUR) == -1 ? -errno : 0; if (r < 0) goto error; memset(s, 0, sizeof(lm_slot_t)); r = write_header(fd, lm); error: close(fd); return r < 0 ? r : 0; }
static int find_unused_slot(struct crypt_device *cd, const lm_t *lm) { for (int slot = 0; slot < LUKS_NSLOTS; slot++) { if (crypt_keyslot_status(cd, slot) == CRYPT_SLOT_INACTIVE && uuid_is_zero(lm->slots[slot].uuid)) return slot; } return -1; }
void tm_tokens_force_release(const exa_uuid_t *uuid) { token_t *t; OS_ASSERT(uuid != NULL && !uuid_is_zero(uuid)); t = __find_token(uuid); if (t != NULL) { __reset_token(t); num_tokens--; } }
int tm_tokens_get_holder(const exa_uuid_t *uuid, exa_nodeid_t *node_id) { token_t *t; OS_ASSERT(uuid != NULL && !uuid_is_zero(uuid)); t = __find_token(uuid); if (t == NULL) return -TM_ERR_NO_SUCH_TOKEN; *node_id = t->holder; return 0; }
int luksmeta_load(struct crypt_device *cd, int slot, luksmeta_uuid_t uuid, void *buf, size_t size) { uint32_t length = 0; lm_slot_t *s = NULL; lm_t lm = {}; int fd = -1; int r = 0; if (slot < 0 || slot >= LUKS_NSLOTS) return -EBADSLT; s = &lm.slots[slot]; fd = read_header(cd, O_RDONLY, &length, &lm); if (fd < 0) return fd; r = uuid_is_zero(s->uuid) ? -ENODATA : 0; if (r < 0) goto error; if (buf) { r = size >= s->length ? 0 : -E2BIG; if (r < 0) goto error; r = lseek(fd, s->offset - sizeof(lm), SEEK_CUR) == -1 ? -errno : 0; if (r < 0) goto error; r = readall(fd, buf, s->length); if (r < 0) goto error; r = crc32c(0, buf, s->length) == s->crc32c ? 0 : -EINVAL; if (r < 0) goto error; } memcpy(uuid, s->uuid, sizeof(luksmeta_uuid_t)); close(fd); return s->length; error: close(fd); return r; }
int tm_tokens_release(const exa_uuid_t *uuid, exa_nodeid_t node_id) { token_t *t; OS_ASSERT(uuid != NULL && !uuid_is_zero(uuid)); OS_ASSERT(node_id != EXA_NODEID_NONE); t = __find_token(uuid); if (t == NULL) return -TM_ERR_NO_SUCH_TOKEN; if (t->holder != node_id) return -TM_ERR_NOT_HOLDER; __reset_token(t); num_tokens--; return 0; }
static token_t *__add_token(const exa_uuid_t *uuid) { token_t *t; int i; for (i = 0; i < TM_TOKENS_MAX; i++) { OS_ASSERT(!uuid_is_equal(&tokens[i].uuid, uuid)); if (uuid_is_zero(&tokens[i].uuid)) break; } if (i >= TM_TOKENS_MAX) return NULL; t = &tokens[i]; uuid_copy(&t->uuid, uuid); t->holder = EXA_NODEID_NONE; os_strlcpy(t->holder_addr, "", sizeof(t->holder_addr)); num_tokens++; return t; }
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); unsigned int flags; ext4_debug("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT4_IOC_GETFLAGS: ext4_get_inode_flags(ei); flags = ei->i_flags & EXT4_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT4_IOC_SETFLAGS: { handle_t *handle = NULL; int err, migrate = 0; struct ext4_iloc iloc; unsigned int oldflags, mask, i; unsigned int jflag; if (!inode_owner_or_capable(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; err = mnt_want_write_file(filp); if (err) return err; flags = ext4_mask_flags(inode->i_mode, flags); err = -EPERM; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) goto flags_out; oldflags = ei->i_flags; /* The JOURNAL_DATA flag is modifiable only by root */ jflag = flags & EXT4_JOURNAL_DATA_FL; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } /* * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } if ((flags ^ oldflags) & EXT4_EXTENTS_FL) migrate = 1; if (flags & EXT4_EOFBLOCKS_FL) { /* we don't support adding EOFBLOCKS flag */ if (!(oldflags & EXT4_EOFBLOCKS_FL)) { err = -EOPNOTSUPP; goto flags_out; } } else if (oldflags & EXT4_EOFBLOCKS_FL) ext4_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto flags_out; } if (IS_SYNC(inode)) ext4_handle_sync(handle); err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto flags_err; for (i = 0, mask = 1; i < 32; i++, mask <<= 1) { if (!(mask & EXT4_FL_USER_MODIFIABLE)) continue; if (mask & flags) ext4_set_inode_flag(inode, i); else ext4_clear_inode_flag(inode, i); } ext4_set_inode_flags(inode); inode->i_ctime = ext4_current_time(inode); err = ext4_mark_iloc_dirty(handle, inode, &iloc); flags_err: ext4_journal_stop(handle); if (err) goto flags_out; if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) err = ext4_change_inode_journal_flag(inode, jflag); if (err) goto flags_out; if (migrate) { if (flags & EXT4_EXTENTS_FL) err = ext4_ext_migrate(inode); else err = ext4_ind_migrate(inode); } flags_out: mutex_unlock(&inode->i_mutex); mnt_drop_write_file(filp); return err; } case EXT4_IOC_GETVERSION: case EXT4_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT4_IOC_SETVERSION: case EXT4_IOC_SETVERSION_OLD: { handle_t *handle; struct ext4_iloc iloc; __u32 generation; int err; if (!inode_owner_or_capable(inode)) return -EPERM; if (ext4_has_metadata_csum(inode->i_sb)) { ext4_warning(sb, "Setting inode version is not " "supported with metadata_csum enabled."); return -ENOTTY; } err = mnt_want_write_file(filp); if (err) return err; if (get_user(generation, (int __user *) arg)) { err = -EFAULT; goto setversion_out; } mutex_lock(&inode->i_mutex); handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto unlock_out; } err = ext4_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = ext4_current_time(inode); inode->i_generation = generation; err = ext4_mark_iloc_dirty(handle, inode, &iloc); } ext4_journal_stop(handle); unlock_out: mutex_unlock(&inode->i_mutex); setversion_out: mnt_drop_write_file(filp); return err; } case EXT4_IOC_GROUP_EXTEND: { ext4_fsblk_t n_blocks_count; int err, err2=0; err = ext4_resize_begin(sb); if (err) return err; if (get_user(n_blocks_count, (__u32 __user *)arg)) { err = -EFAULT; goto group_extend_out; } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online resizing not supported with bigalloc"); err = -EOPNOTSUPP; goto group_extend_out; } err = mnt_want_write_file(filp); if (err) goto group_extend_out; err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write_file(filp); group_extend_out: ext4_resize_end(sb); return err; } case EXT4_IOC_MOVE_EXT: { struct move_extent me; struct fd donor; int err; if (!(filp->f_mode & FMODE_READ) || !(filp->f_mode & FMODE_WRITE)) return -EBADF; if (copy_from_user(&me, (struct move_extent __user *)arg, sizeof(me))) return -EFAULT; me.moved_len = 0; donor = fdget(me.donor_fd); if (!donor.file) return -EBADF; if (!(donor.file->f_mode & FMODE_WRITE)) { err = -EBADF; goto mext_out; } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online defrag not supported with bigalloc"); err = -EOPNOTSUPP; goto mext_out; } err = mnt_want_write_file(filp); if (err) goto mext_out; err = ext4_move_extents(filp, donor.file, me.orig_start, me.donor_start, me.len, &me.moved_len); mnt_drop_write_file(filp); if (copy_to_user((struct move_extent __user *)arg, &me, sizeof(me))) err = -EFAULT; mext_out: fdput(donor); return err; } case EXT4_IOC_GROUP_ADD: { struct ext4_new_group_data input; int err, err2=0; err = ext4_resize_begin(sb); if (err) return err; if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg, sizeof(input))) { err = -EFAULT; goto group_add_out; } if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online resizing not supported with bigalloc"); err = -EOPNOTSUPP; goto group_add_out; } err = mnt_want_write_file(filp); if (err) goto group_add_out; err = ext4_group_add(sb, &input); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write_file(filp); if (!err && ext4_has_group_desc_csum(sb) && test_opt(sb, INIT_INODE_TABLE)) err = ext4_register_li_request(sb, input.group); group_add_out: ext4_resize_end(sb); return err; } case EXT4_IOC_MIGRATE: { int err; if (!inode_owner_or_capable(inode)) return -EACCES; err = mnt_want_write_file(filp); if (err) return err; /* * inode_mutex prevent write and truncate on the file. * Read still goes through. We take i_data_sem in * ext4_ext_swap_inode_data before we switch the * inode format to prevent read. */ mutex_lock(&(inode->i_mutex)); err = ext4_ext_migrate(inode); mutex_unlock(&(inode->i_mutex)); mnt_drop_write_file(filp); return err; } case EXT4_IOC_ALLOC_DA_BLKS: { int err; if (!inode_owner_or_capable(inode)) return -EACCES; err = mnt_want_write_file(filp); if (err) return err; err = ext4_alloc_da_blocks(inode); mnt_drop_write_file(filp); return err; } case EXT4_IOC_SWAP_BOOT: { int err; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; err = mnt_want_write_file(filp); if (err) return err; err = swap_inode_boot_loader(sb, inode); mnt_drop_write_file(filp); return err; } case EXT4_IOC_RESIZE_FS: { ext4_fsblk_t n_blocks_count; int err = 0, err2 = 0; ext4_group_t o_group = EXT4_SB(sb)->s_groups_count; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { ext4_msg(sb, KERN_ERR, "Online resizing not (yet) supported with bigalloc"); return -EOPNOTSUPP; } if (copy_from_user(&n_blocks_count, (__u64 __user *)arg, sizeof(__u64))) { return -EFAULT; } err = ext4_resize_begin(sb); if (err) return err; err = mnt_want_write_file(filp); if (err) goto resizefs_out; err = ext4_resize_fs(sb, n_blocks_count); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write_file(filp); if (!err && (o_group > EXT4_SB(sb)->s_groups_count) && ext4_has_group_desc_csum(sb) && test_opt(sb, INIT_INODE_TABLE)) err = ext4_register_li_request(sb, o_group); resizefs_out: ext4_resize_end(sb); return err; } case FITRIM: { struct request_queue *q = bdev_get_queue(sb->s_bdev); struct fstrim_range range; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!blk_queue_discard(q)) return -EOPNOTSUPP; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; range.minlen = max((unsigned int)range.minlen, q->limits.discard_granularity); ret = ext4_trim_fs(sb, &range); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } case EXT4_IOC_PRECACHE_EXTENTS: return ext4_ext_precache(inode); case EXT4_IOC_SET_ENCRYPTION_POLICY: { #ifdef CONFIG_EXT4_FS_ENCRYPTION struct ext4_encryption_policy policy; int err = 0; if (copy_from_user(&policy, (struct ext4_encryption_policy __user *)arg, sizeof(policy))) { err = -EFAULT; goto encryption_policy_out; } err = ext4_process_policy(&policy, inode); encryption_policy_out: return err; #else return -EOPNOTSUPP; #endif } case EXT4_IOC_GET_ENCRYPTION_PWSALT: { int err, err2; struct ext4_sb_info *sbi = EXT4_SB(sb); handle_t *handle; if (!ext4_sb_has_crypto(sb)) return -EOPNOTSUPP; if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) { err = mnt_want_write_file(filp); if (err) return err; handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto pwsalt_err_exit; } err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto pwsalt_err_journal; generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); pwsalt_err_journal: err2 = ext4_journal_stop(handle); if (err2 && !err) err = err2; pwsalt_err_exit: mnt_drop_write_file(filp); if (err) return err; } if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt, 16)) return -EFAULT; return 0; } case EXT4_IOC_GET_ENCRYPTION_POLICY: { #ifdef CONFIG_EXT4_FS_ENCRYPTION struct ext4_encryption_policy policy; int err = 0; if (!ext4_encrypted_inode(inode)) return -ENOENT; err = ext4_get_policy(inode, &policy); if (err) return err; if (copy_to_user((void *)arg, &policy, sizeof(policy))) return -EFAULT; return 0; #else return -EOPNOTSUPP; #endif } default: return -ENOTTY; } }
/** \brief Initialization of examsgd daemon. * * Command line must contain the node name and the interface name to use * for control messages. * * Accepts hidden option -d (debugging mode). * * \param[in] argc Argument count. * \param[in] argv Array of argument values. * * \return exit code. */ int daemon_init(int argc, char *argv[]) { int s; /* getopt variables */ static struct option long_opts[] = { { "cluster-id", required_argument, NULL, 'c' }, { "help", no_argument, NULL, 'h' }, { "hostname", required_argument, NULL, 'N' }, { "incarnation", required_argument, NULL, 'I' }, { "mcast-addr", required_argument, NULL, 'm' }, { "mcast-port", required_argument, NULL, 'p' }, { "node-id", required_argument, NULL, 'i' }, { "node-name", required_argument, NULL, 'n' }, { "stats", no_argument, NULL, 's' }, { NULL, 0, NULL, 0 } }; int long_idx, c; char *e; extern char *optarg; extern int optind; /* configurable options and default values */ const char *node_name = NULL; const char *hostname = NULL; const char *mgroup = EXAMSG_MCASTIP; unsigned short mport = EXAMSG_PORT; unsigned short inca = 0; exa_uuid_t cluster_uuid; exa_nodeid_t nodeid; bool err = false; uuid_zero(&cluster_uuid); nodeid = EXA_NODEID_NONE; /* options parsing */ while ((c = os_getopt_long(argc, argv, "c:dhi:I:m:n:N:p:s", long_opts, &long_idx)) != -1) switch (c) { case 'c': if (uuid_scan(optarg, &cluster_uuid) < 0) { fprintf(stderr, "invalid cluster id: '%s'\n", optarg); return -EINVAL; } break; case 'i': nodeid = (exa_nodeid_t)strtol(optarg, &e, 10); if (*e || !EXA_NODEID_VALID(nodeid)) { fprintf(stderr, "invalid node id: '%s'\n", optarg); return -EINVAL; } break; case 'I': inca = (unsigned short)strtol(optarg, &e, 10); if (*e || inca == 0) { fprintf(stderr, "invalid incarnation: '%s'\n", optarg); return -EINVAL; } break; /* multicast group */ case 'm': mgroup = optarg; break; case 'n': node_name = optarg; break; /* hostname */ case 'N': hostname = optarg; break; /* communication port */ case 'p': mport = strtol(optarg, &e, 0); if (*e != '\0') { fprintf(stderr, "invalid port number '%s'\n", optarg); return -EINVAL; } break; case 's': examsg_show_stats(); return 0; break; /* usage */ case 'h': case '?': default: usage(argv[0]); return -EINVAL; } if (uuid_is_zero(&cluster_uuid)) { fprintf(stderr, "missing cluster id\n"); err = true; } if (nodeid == EXA_NODEID_NONE) { fprintf(stderr, "missing node id\n"); err = true; } if (node_name == NULL) { fprintf(stderr, "missing node name\n"); err = true; } if (hostname == NULL) { fprintf(stderr, "missing hostname\n"); err = true; } if (inca == 0) { fprintf(stderr, "missing incarnation\n"); err = true; } if (err) return -EINVAL; /* Get cluster id, number of nodes, node id, node name and interface parameters */ if (argc - optind != 0) { fprintf(stderr, "stray parameters\n"); usage(argv[0]); return -EINVAL; } signal(SIGTERM, sig_term); signal(SIGINT, sig_term); s = examsg_static_init(EXAMSG_STATIC_GET); if (s) { fprintf(stderr, "Can't initialize messaging layer."); return s; } exalog_static_init(); /* Log as exa_msgd by default */ exalog_as(EXAMSG_CMSGD_ID); #ifdef USE_YAOURT if (yaourt_init()) exalog_debug("Yaourt: Examsgd init OK"); else exalog_warning("Yaourt: Examsgd init FAILED (%s)", yaourt_error); #endif /* set up network communication */ return startup(&cluster_uuid, node_name, hostname, nodeid, mgroup, mport, inca); }
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); unsigned int flags; ext4_debug("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case FS_IOC_GETFSMAP: return ext4_ioc_getfsmap(sb, (void __user *)arg); case EXT4_IOC_GETFLAGS: flags = ei->i_flags & EXT4_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT4_IOC_SETFLAGS: { int err; if (!inode_owner_or_capable(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; if (flags & ~EXT4_FL_USER_VISIBLE) return -EOPNOTSUPP; /* * chattr(1) grabs flags via GETFLAGS, modifies the result and * passes that to SETFLAGS. So we cannot easily make SETFLAGS * more restrictive than just silently masking off visible but * not settable flags as we always did. */ flags &= EXT4_FL_USER_MODIFIABLE; if (ext4_mask_flags(inode->i_mode, flags) != flags) return -EOPNOTSUPP; err = mnt_want_write_file(filp); if (err) return err; inode_lock(inode); err = ext4_ioctl_setflags(inode, flags); inode_unlock(inode); mnt_drop_write_file(filp); return err; } case EXT4_IOC_GETVERSION: case EXT4_IOC_GETVERSION_OLD: return put_user(inode->i_generation, (int __user *) arg); case EXT4_IOC_SETVERSION: case EXT4_IOC_SETVERSION_OLD: { handle_t *handle; struct ext4_iloc iloc; __u32 generation; int err; if (!inode_owner_or_capable(inode)) return -EPERM; if (ext4_has_metadata_csum(inode->i_sb)) { ext4_warning(sb, "Setting inode version is not " "supported with metadata_csum enabled."); return -ENOTTY; } err = mnt_want_write_file(filp); if (err) return err; if (get_user(generation, (int __user *) arg)) { err = -EFAULT; goto setversion_out; } inode_lock(inode); handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto unlock_out; } err = ext4_reserve_inode_write(handle, inode, &iloc); if (err == 0) { inode->i_ctime = current_time(inode); inode->i_generation = generation; err = ext4_mark_iloc_dirty(handle, inode, &iloc); } ext4_journal_stop(handle); unlock_out: inode_unlock(inode); setversion_out: mnt_drop_write_file(filp); return err; } case EXT4_IOC_GROUP_EXTEND: { ext4_fsblk_t n_blocks_count; int err, err2=0; err = ext4_resize_begin(sb); if (err) return err; if (get_user(n_blocks_count, (__u32 __user *)arg)) { err = -EFAULT; goto group_extend_out; } if (ext4_has_feature_bigalloc(sb)) { ext4_msg(sb, KERN_ERR, "Online resizing not supported with bigalloc"); err = -EOPNOTSUPP; goto group_extend_out; } err = mnt_want_write_file(filp); if (err) goto group_extend_out; err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write_file(filp); group_extend_out: ext4_resize_end(sb); return err; } case EXT4_IOC_MOVE_EXT: { struct move_extent me; struct fd donor; int err; if (!(filp->f_mode & FMODE_READ) || !(filp->f_mode & FMODE_WRITE)) return -EBADF; if (copy_from_user(&me, (struct move_extent __user *)arg, sizeof(me))) return -EFAULT; me.moved_len = 0; donor = fdget(me.donor_fd); if (!donor.file) return -EBADF; if (!(donor.file->f_mode & FMODE_WRITE)) { err = -EBADF; goto mext_out; } if (ext4_has_feature_bigalloc(sb)) { ext4_msg(sb, KERN_ERR, "Online defrag not supported with bigalloc"); err = -EOPNOTSUPP; goto mext_out; } else if (IS_DAX(inode)) { ext4_msg(sb, KERN_ERR, "Online defrag not supported with DAX"); err = -EOPNOTSUPP; goto mext_out; } err = mnt_want_write_file(filp); if (err) goto mext_out; err = ext4_move_extents(filp, donor.file, me.orig_start, me.donor_start, me.len, &me.moved_len); mnt_drop_write_file(filp); if (copy_to_user((struct move_extent __user *)arg, &me, sizeof(me))) err = -EFAULT; mext_out: fdput(donor); return err; } case EXT4_IOC_GROUP_ADD: { struct ext4_new_group_data input; if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg, sizeof(input))) return -EFAULT; return ext4_ioctl_group_add(filp, &input); } case EXT4_IOC_MIGRATE: { int err; if (!inode_owner_or_capable(inode)) return -EACCES; err = mnt_want_write_file(filp); if (err) return err; /* * inode_mutex prevent write and truncate on the file. * Read still goes through. We take i_data_sem in * ext4_ext_swap_inode_data before we switch the * inode format to prevent read. */ inode_lock((inode)); err = ext4_ext_migrate(inode); inode_unlock((inode)); mnt_drop_write_file(filp); return err; } case EXT4_IOC_ALLOC_DA_BLKS: { int err; if (!inode_owner_or_capable(inode)) return -EACCES; err = mnt_want_write_file(filp); if (err) return err; err = ext4_alloc_da_blocks(inode); mnt_drop_write_file(filp); return err; } case EXT4_IOC_SWAP_BOOT: { int err; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; err = mnt_want_write_file(filp); if (err) return err; err = swap_inode_boot_loader(sb, inode); mnt_drop_write_file(filp); return err; } case EXT4_IOC_RESIZE_FS: { ext4_fsblk_t n_blocks_count; int err = 0, err2 = 0; ext4_group_t o_group = EXT4_SB(sb)->s_groups_count; if (copy_from_user(&n_blocks_count, (__u64 __user *)arg, sizeof(__u64))) { return -EFAULT; } err = ext4_resize_begin(sb); if (err) return err; err = mnt_want_write_file(filp); if (err) goto resizefs_out; err = ext4_resize_fs(sb, n_blocks_count); if (EXT4_SB(sb)->s_journal) { jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); } if (err == 0) err = err2; mnt_drop_write_file(filp); if (!err && (o_group > EXT4_SB(sb)->s_groups_count) && ext4_has_group_desc_csum(sb) && test_opt(sb, INIT_INODE_TABLE)) err = ext4_register_li_request(sb, o_group); resizefs_out: ext4_resize_end(sb); return err; } case FITRIM: { struct request_queue *q = bdev_get_queue(sb->s_bdev); struct fstrim_range range; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!blk_queue_discard(q)) return -EOPNOTSUPP; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; range.minlen = max((unsigned int)range.minlen, q->limits.discard_granularity); ret = ext4_trim_fs(sb, &range); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } case EXT4_IOC_PRECACHE_EXTENTS: return ext4_ext_precache(inode); case EXT4_IOC_SET_ENCRYPTION_POLICY: if (!ext4_has_feature_encrypt(sb)) return -EOPNOTSUPP; return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); case EXT4_IOC_GET_ENCRYPTION_PWSALT: { #ifdef CONFIG_EXT4_FS_ENCRYPTION int err, err2; struct ext4_sb_info *sbi = EXT4_SB(sb); handle_t *handle; if (!ext4_has_feature_encrypt(sb)) return -EOPNOTSUPP; if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) { err = mnt_want_write_file(filp); if (err) return err; handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto pwsalt_err_exit; } err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto pwsalt_err_journal; generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); pwsalt_err_journal: err2 = ext4_journal_stop(handle); if (err2 && !err) err = err2; pwsalt_err_exit: mnt_drop_write_file(filp); if (err) return err; } if (copy_to_user((void __user *) arg, sbi->s_es->s_encrypt_pw_salt, 16)) return -EFAULT; return 0; #else return -EOPNOTSUPP; #endif } case EXT4_IOC_GET_ENCRYPTION_POLICY: return fscrypt_ioctl_get_policy(filp, (void __user *)arg); case EXT4_IOC_FSGETXATTR: { struct fsxattr fa; memset(&fa, 0, sizeof(struct fsxattr)); fa.fsx_xflags = ext4_iflags_to_xflags(ei->i_flags & EXT4_FL_USER_VISIBLE); if (ext4_has_feature_project(inode->i_sb)) { fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, EXT4_I(inode)->i_projid); } if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) return -EFAULT; return 0; } case EXT4_IOC_FSSETXATTR: { struct fsxattr fa; int err; if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) return -EFAULT; /* Make sure caller has proper permission */ if (!inode_owner_or_capable(inode)) return -EACCES; if (fa.fsx_xflags & ~EXT4_SUPPORTED_FS_XFLAGS) return -EOPNOTSUPP; flags = ext4_xflags_to_iflags(fa.fsx_xflags); if (ext4_mask_flags(inode->i_mode, flags) != flags) return -EOPNOTSUPP; err = mnt_want_write_file(filp); if (err) return err; inode_lock(inode); flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) | (flags & EXT4_FL_XFLAG_VISIBLE); err = ext4_ioctl_setflags(inode, flags); inode_unlock(inode); mnt_drop_write_file(filp); if (err) return err; err = ext4_ioctl_setproject(filp, fa.fsx_projid); if (err) return err; return 0; } case EXT4_IOC_SHUTDOWN: return ext4_shutdown(sb, arg); default: return -ENOTTY; } }
static void get_info_from_params(const struct dgcreate_params *params, struct dgcreate_info *info, cl_error_desc_t *err_desc) { xmlDocPtr config; xmlNodePtr diskgroup_ptr; xmlAttrPtr attr; int i; EXA_ASSERT(params); EXA_ASSERT(info); EXA_ASSERT(err_desc); config = params->config; memset(info, 0, sizeof(*info)); diskgroup_ptr = xml_conf_xpath_singleton(config, "/Exanodes/diskgroup"); uuid_generate(&info->uuid); /* 0 means that the slot width will be automagically computed */ info->slot_width = 0; info->chunk_size = adm_cluster_get_param_int("default_chunk_size"); info->su_size = adm_cluster_get_param_int("default_su_size"); info->dirty_zone_size = adm_cluster_get_param_int("default_dirty_zone_size"); info->blended_stripes = false; info->nb_disks = 0; info->nb_spare = VRT_DEFAULT_NB_SPARES; info->layout[0] = '\0'; for (attr = diskgroup_ptr->properties; attr != NULL; attr = attr->next) { if (xmlStrEqual(attr->name, BAD_CAST("name"))) strlcpy(info->name, xml_get_prop(diskgroup_ptr, "name"), EXA_MAXSIZE_GROUPNAME + 1); else if (xmlStrEqual(attr->name, BAD_CAST("layout"))) strlcpy(info->layout, xml_get_prop(diskgroup_ptr, "layout"), EXA_MAXSIZE_LAYOUTNAME + 1); else if (xmlStrEqual(attr->name, BAD_CAST("slot_width"))) { if (xml_get_uint_prop(diskgroup_ptr, "slot_width", &info->slot_width, err_desc) != 0) return; /* NOTE User can not give a zero value * If slot_width is not provided, we pass zero * to vrt so that it can calculate the proper slot_width */ if (info->slot_width == 0) { set_error(err_desc, -EXA_ERR_XML_GET, "slot_width must be greater than zero"); return; } } else if (xmlStrEqual(attr->name, BAD_CAST("chunk_size"))) { if (xml_get_uint_prop(diskgroup_ptr, "chunk_size", &info->chunk_size, err_desc) != 0) return; } else if (xmlStrEqual(attr->name, BAD_CAST("su_size"))) { if (xml_get_uint_prop(diskgroup_ptr, "su_size", &info->su_size, err_desc) != 0) return; } else if (xmlStrEqual(attr->name, BAD_CAST("dirty_zone_size"))) { if (xml_get_uint_prop(diskgroup_ptr, "dirty_zone_size", &info->dirty_zone_size, err_desc) != 0) return; } else if (xmlStrEqual(attr->name, BAD_CAST("blended_stripes"))) { if (xml_get_uint_prop(diskgroup_ptr, "blended_stripes", &info->blended_stripes, err_desc) != 0) return; } else if (xmlStrEqual(attr->name, BAD_CAST("nb_spare"))) { if (xml_get_uint_prop(diskgroup_ptr, "nb_spare", &info->nb_spare, err_desc) != 0) return; } else if (!xmlStrEqual(attr->name, BAD_CAST("cluster"))) { set_error(err_desc, -EXA_ERR_XML_GET, "Unknown group property '%s'", (char *)attr->name); return; } } /* Check the group name */ if (info->name == NULL || info->name[0] == '\0') { set_error(err_desc, -EXA_ERR_INVALID_PARAM, NULL); return; } /* Check if a group with that name already exist */ if (adm_group_get_group_by_name(info->name) != NULL) { set_error(err_desc, -VRT_ERR_GROUPNAME_USED, NULL); return; } if (info->layout[0] == '\0') { set_error(err_desc, -EXA_ERR_XML_GET, NULL); return; } if (params->alldisks) { struct adm_node *node; adm_cluster_for_each_node(node) { struct adm_disk *disk; adm_node_for_each_disk(node, disk) { if (uuid_is_zero(&disk->group_uuid)) { if (disk->path[0] == '\0') { set_error(err_desc, -ADMIND_ERR_UNKNOWN_DISK, "disk " UUID_FMT " is unknown", UUID_VAL(&disk->uuid)); return; } if (info->nb_disks >= NBMAX_DISKS_PER_GROUP) { set_error(err_desc, -ADMIND_ERR_TOO_MANY_DISKS_IN_GROUP, "too many disks in group (> %d)", NBMAX_DISKS_PER_GROUP); return; } uuid_copy(&info->disks[info->nb_disks], &disk->uuid); info->nb_disks++; } } } } else {