static int __init test_init(void) { dev_t dev; int err = 0; struct block_device *bdev; LOGe("BUILD_DATE %s\n", BUILD_DATE); if (path_) { bdev = blkdev_get_by_path( path_, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); goto error0; } blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } if (major_ != UINT_MAX && minor_ != UINT_MAX) { dev = MKDEV(major_, minor_); bdev = blkdev_get_by_dev( dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); goto error0; } blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } LOGn("succeeded.\n"); return -1; error0: LOGn("failed %d.\n", err); return -1; }
static int __init test_init(void) { struct kmem_cache *cache; #if 0 bio_entry_init(); bio_entry_exit(); #endif #if 0 bio_wrapper_init(); bio_wrapper_exit(); #endif LOGn("sizeof bio_entry %zu bio_wrapper %zu\n", sizeof(struct bio_entry), sizeof(struct bio_wrapper)); cache = kmem_cache_create( "test_bio_entry_cache", obj_size_, 0, 0, NULL); if (cache) { LOGn("kmem_cache_create size %u success.\n", obj_size_); msleep(1); kmem_cache_destroy(cache); } else { LOGn("kmem_cache_create size %u failed.\n", obj_size_); } return -1; }
/** * Melt a frozen device. */ void task_melt(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, work); struct walb_dev *wdev = container_of(dwork, struct walb_dev, freeze_dwork); ASSERT(wdev); mutex_lock(&wdev->freeze_lock); switch (wdev->freeze_state) { case FRZ_MELTED: LOGn("FRZ_MELTED minor %u.\n", MINOR(wdev->devt)); break; case FRZ_FREEZED: LOGn("FRZ_FREEZED minor %u.\n", MINOR(wdev->devt)); break; case FRZ_FREEZED_WITH_TIMEOUT: LOGn("Melt walb device minor %u.\n", MINOR(wdev->devt)); start_checkpointing(&wdev->cpd); iocore_melt(wdev); wdev->freeze_state = FRZ_MELTED; break; default: BUG(); } mutex_unlock(&wdev->freeze_lock); }
/** * Set oldest_lsid. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_set_oldest_lsid(struct walb_dev *wdev, struct walb_ctl *ctl) { u64 lsid, oldest_lsid, written_lsid; LOGn("WALB_IOCTL_SET_OLDEST_LSID_SET\n"); lsid = ctl->val_u64; spin_lock(&wdev->lsid_lock); written_lsid = wdev->lsids.written; oldest_lsid = wdev->lsids.oldest; spin_unlock(&wdev->lsid_lock); if (!(lsid == written_lsid || (oldest_lsid <= lsid && lsid < written_lsid && walb_check_lsid_valid(wdev, lsid)))) { LOGe("lsid %"PRIu64" is not valid.\n", lsid); LOGe("You shoud specify valid logpack header lsid" " (oldest_lsid (%"PRIu64") <= lsid <= written_lsid (%"PRIu64").\n", oldest_lsid, written_lsid); return -EFAULT; } spin_lock(&wdev->lsid_lock); wdev->lsids.oldest = lsid; spin_unlock(&wdev->lsid_lock); if (!walb_sync_super_block(wdev)) { LOGe("sync super block failed.\n"); return -EFAULT; } return 0; }
/** * Helper function for tasks. * * @data any data. * @nr flag bit number. * @flags_p pointer to flags data. * @wq workqueue. * @task task. * * RETURN: * pack_work if really enqueued, or NULL. */ struct pack_work* enqueue_task_if_necessary( void *data, int nr, unsigned long *flags_p, struct workqueue_struct *wq, void (*task)(struct work_struct *)) { struct pack_work *pwork = NULL; int ret; ASSERT(task); ASSERT(wq); retry: if (!test_and_set_bit(nr, flags_p)) { pwork = create_pack_work(data, GFP_NOIO); if (!pwork) { LOGn("memory allocation failed.\n"); clear_bit(nr, flags_p); schedule(); goto retry; } LOG_("enqueue task for %d\n", nr); INIT_WORK(&pwork->work, task); ret = queue_work(wq, &pwork->work); if (!ret) { LOGe("work is already on the queue.\n"); } } return pwork; }
/** * Resize disk. * * @gd disk. * @new_size new size [logical block]. * * RETURN: * true in success, or false. */ bool resize_disk(struct gendisk *gd, u64 new_size) { struct block_device *bdev; u64 old_size; ASSERT(gd); old_size = get_capacity(gd); if (old_size == new_size) { return true; } set_capacity(gd, new_size); bdev = bdget_disk(gd, 0); if (!bdev) { LOGe("bdget_disk failed.\n"); return false; } mutex_lock(&bdev->bd_mutex); if (old_size > new_size) { LOGn("Shrink disk should discard block cache.\n"); check_disk_size_change(gd, bdev); /* This should be implemented in check_disk_size_change(). */ bdev->bd_invalidated = 0; } else { i_size_write(bdev->bd_inode, (loff_t)new_size * LOGICAL_BLOCK_SIZE); } mutex_unlock(&bdev->bd_mutex); bdput(bdev); return true; }
/** * Get status. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_status(struct walb_dev *wdev, struct walb_ctl *ctl) { /* not yet implemented */ LOGn("WALB_IOCTL_STATUS is not supported currently.\n"); return -EFAULT; }
/** * Resize walb device. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_resize(struct walb_dev *wdev, struct walb_ctl *ctl) { u64 ddev_size; u64 new_size; u64 old_size; LOGn("WALB_IOCTL_RESIZE.\n"); ASSERT(ctl->command == WALB_IOCTL_RESIZE); old_size = get_capacity(wdev->gd); new_size = ctl->val_u64; ddev_size = wdev->ddev->bd_part->nr_sects; if (new_size == 0) { new_size = ddev_size; } if (new_size < old_size) { LOGe("Shrink size from %"PRIu64" to %"PRIu64" is not supported.\n", old_size, new_size); return -EFAULT; } if (new_size > ddev_size) { LOGe("new_size %"PRIu64" > data device capacity %"PRIu64".\n", new_size, ddev_size); return -EFAULT; } if (new_size == old_size) { LOGn("No need to resize.\n"); return 0; } spin_lock(&wdev->size_lock); wdev->size = new_size; wdev->ddev_size = ddev_size; spin_unlock(&wdev->size_lock); if (!resize_disk(wdev->gd, new_size)) { return -EFAULT; } /* Sync super block for super->device_size */ if (!walb_sync_super_block(wdev)) { LOGe("superblock sync failed.\n"); return -EFAULT; } return 0; }
/** * Get oldest_lsid. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_get_oldest_lsid(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_GET_OLDEST_LSID\n"); ASSERT(ctl->command == WALB_IOCTL_GET_OLDEST_LSID); ctl->val_u64 = get_oldest_lsid(wdev); return 0; }
/** * Get flush request capable or not. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_is_flush_capable(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_IS_FLUAH_CAPABLE"); ASSERT(ctl->command == WALB_IOCTL_IS_FLUSH_CAPABLE); ctl->val_int = (wdev->queue->flush_flags & REQ_FLUSH) != 0; return 0; }
/** * Get log capacity. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_get_log_capacity(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_GET_LOG_CAPACITY\n"); ASSERT(ctl->command == WALB_IOCTL_GET_LOG_CAPACITY); ctl->val_u64 = walb_get_log_capacity(wdev); return 0; }
/** * Get log usage. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_get_log_usage(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_GET_LOG_USAGE\n"); ASSERT(ctl->command == WALB_IOCTL_GET_LOG_USAGE); ctl->val_u64 = walb_get_log_usage(wdev); return 0; }
/** * Get completed_lsid. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_get_completed_lsid(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_GET_COMPLETED_LSID\n"); ASSERT(ctl->command == WALB_IOCTL_GET_COMPLETED_LSID); ctl->val_u64 = get_completed_lsid(wdev); return 0; }
/** * Get written_lsid. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_get_written_lsid(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_GET_WRITTEN_LSID\n"); ASSERT(ctl->command == WALB_IOCTL_GET_WRITTEN_LSID); ctl->val_u64 = get_written_lsid(wdev); return 0; }
/** * Get checkpoint interval. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_get_checkpoint_interval(struct walb_dev *wdev, struct walb_ctl *ctl) { LOGn("WALB_IOCTL_GET_CHECKPOINT_INTERVAL\n"); ASSERT(ctl->command == WALB_IOCTL_GET_CHECKPOINT_INTERVAL); ctl->val_u32 = get_checkpoint_interval(&wdev->cpd); return 0; }
/** * Check log space overflow. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_is_log_overflow(struct walb_dev *wdev, struct walb_ctl *ctl) { ASSERT(ctl->command == WALB_IOCTL_IS_LOG_OVERFLOW); LOGn("WALB_IOCTL_IS_LOG_OVERFLOW.\n"); ctl->val_int = iocore_is_log_overflow(wdev); return 0; }
/** * Freeze if melted and enqueue a melting work if required. * * @wdev walb device. * @timeout_sec timeout to melt the device [sec]. * Specify 0 for no timeout. * * RETURN: * true in success, or false (due to race condition). */ bool freeze_if_melted(struct walb_dev *wdev, u32 timeout_sec) { unsigned int minor; int ret; ASSERT(wdev); minor = MINOR(wdev->devt); /* Freeze and enqueue a melt work if required. */ mutex_lock(&wdev->freeze_lock); switch (wdev->freeze_state) { case FRZ_MELTED: /* Freeze iocore and checkpointing. */ LOGn("Freeze walb device minor %u.\n", minor); iocore_freeze(wdev); stop_checkpointing(&wdev->cpd); wdev->freeze_state = FRZ_FREEZED; break; case FRZ_FREEZED: /* Do nothing. */ LOGn("Already frozen minor %u.\n", minor); break; case FRZ_FREEZED_WITH_TIMEOUT: LOGe("Race condition occured.\n"); mutex_unlock(&wdev->freeze_lock); return false; default: BUG(); } ASSERT(wdev->freeze_state == FRZ_FREEZED); if (timeout_sec > 0) { LOGn("(Re)set frozen timeout to %"PRIu32" seconds.\n", timeout_sec); INIT_DELAYED_WORK(&wdev->freeze_dwork, task_melt); ret = queue_delayed_work( wq_misc_, &wdev->freeze_dwork, msecs_to_jiffies(timeout_sec * 1000)); ASSERT(ret); wdev->freeze_state = FRZ_FREEZED_WITH_TIMEOUT; } ASSERT(wdev->freeze_state != FRZ_MELTED); mutex_unlock(&wdev->freeze_lock); return true; }
/** * Finalize worker. * * This will wait the last execution of the task. */ void finalize_worker(struct worker_data *wd) { ASSERT(wd); kthread_stop(wd->tsk); wait_for_completion(&wd->done); #ifdef WORKER_DEBUG LOGn("worker counter %lu\n", wd->count); #endif }
/** * Check whether the device is frozen or not. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0. */ static int ioctl_wdev_is_frozen(struct walb_dev *wdev, struct walb_ctl *ctl) { ASSERT(ctl->command == WALB_IOCTL_IS_FROZEN); LOGn("WALB_IOCTL_IS_FROZEN\n"); mutex_lock(&wdev->freeze_lock); ctl->val_int = (wdev->freeze_state == FRZ_MELTED) ? 0 : 1; mutex_unlock(&wdev->freeze_lock); return 0; }
/** * Melt a frozen device. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_melt(struct walb_dev *wdev, struct walb_ctl *ctl) { ASSERT(ctl->command == WALB_IOCTL_MELT); LOGn("WALB_IOCTL_MELT\n"); cancel_melt_work(wdev); if (melt_if_frozen(wdev, true)) { return 0; } return -EFAULT; }
/** * Decide flush support or not. */ void walb_decide_flush_support(struct walb_dev *wdev) { struct request_queue *q; const struct request_queue *lq, *dq; bool lq_flush, dq_flush, lq_fua, dq_fua; ASSERT(wdev); /* Get queues. */ q = wdev->queue; ASSERT(q); lq = bdev_get_queue(wdev->ldev); dq = bdev_get_queue(wdev->ddev); /* Get flush/fua flags. */ lq_flush = lq->flush_flags & REQ_FLUSH; dq_flush = dq->flush_flags & REQ_FLUSH; lq_fua = lq->flush_flags & REQ_FUA; dq_fua = dq->flush_flags & REQ_FUA; LOGn("flush/fua flags: log_device %d/%d data_device %d/%d\n", lq_flush, lq_fua, dq_flush, dq_fua); /* Check REQ_FLUSH/REQ_FUA supports. */ wdev->support_flush = false; wdev->support_fua = false; if (lq_flush && dq_flush) { uint flush_flags = REQ_FLUSH; LOGn("Supports REQ_FLUSH."); wdev->support_flush = true; if (lq_fua) { flush_flags |= REQ_FUA; LOGn("Supports REQ_FUA."); wdev->support_fua = true; } blk_queue_flush(q, flush_flags); blk_queue_flush_queueable(q, true); } else { LOGw("REQ_FLUSH is not supported!\n" "WalB can not guarantee data consistency" "in sudden crashes of underlying devices.\n"); } }
/** * Freeze a walb device. * Currently write IOs will be frozen but read IOs will not. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_freeze(struct walb_dev *wdev, struct walb_ctl *ctl) { u32 timeout_sec; ASSERT(ctl->command == WALB_IOCTL_FREEZE); LOGn("WALB_IOCTL_FREEZE\n"); /* Clip timeout value. */ timeout_sec = ctl->val_u32; if (timeout_sec > 86400) { timeout_sec = 86400; LOGn("Freeze timeout has been cut to %"PRIu32" seconds.\n", timeout_sec); } cancel_melt_work(wdev); if (freeze_if_melted(wdev, timeout_sec)) { return 0; } return -EFAULT; }
/** * Melt a device if frozen. * * RETURN: * true in success, or false (due to race condition). */ bool melt_if_frozen( struct walb_dev *wdev, bool restarts_checkpointing) { unsigned int minor; ASSERT(wdev); minor = MINOR(wdev->devt); cancel_melt_work(wdev); /* Melt the device if required. */ mutex_lock(&wdev->freeze_lock); switch (wdev->freeze_state) { case FRZ_MELTED: /* Do nothing. */ LOGn("Already melted minor %u\n", minor); break; case FRZ_FREEZED: /* Melt. */ LOGn("Melt walb device minor %u.\n", minor); if (restarts_checkpointing) { start_checkpointing(&wdev->cpd); } iocore_melt(wdev); wdev->freeze_state = FRZ_MELTED; break; case FRZ_FREEZED_WITH_TIMEOUT: /* Race condition. */ LOGe("Race condition occurred.\n"); mutex_unlock(&wdev->freeze_lock); return false; default: BUG(); } ASSERT(wdev->freeze_state == FRZ_MELTED); mutex_unlock(&wdev->freeze_lock); return true; }
/** * Support discard. */ void walb_discard_support(struct walb_dev *wdev) { struct request_queue *q = wdev->queue; LOGn("Supports REQ_DISCARD.\n"); q->limits.discard_granularity = wdev->physical_bs; /* Should be stored in u16 variable and aligned. */ q->limits.max_discard_sectors = 1 << 15; q->limits.discard_zeroes_data = 0; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); wdev->support_discard = true; }
/** * Set checkpoint interval. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_set_checkpoint_interval(struct walb_dev *wdev, struct walb_ctl *ctl) { u32 interval; LOGn("WALB_IOCTL_SET_CHECKPOINT_INTERVAL\n"); ASSERT(ctl->command == WALB_IOCTL_SET_CHECKPOINT_INTERVAL); interval = ctl->val_u32; if (interval > WALB_MAX_CHECKPOINT_INTERVAL) { LOGe("Checkpoint interval is too big.\n"); return -EFAULT; } set_checkpoint_interval(&wdev->cpd, interval); return 0; }
void wait_for_bio_entry(struct bio_entry *bioe, ulong timeoutMs) { const ulong timeo = msecs_to_jiffies(timeoutMs); ulong rtimeo; int c = 0; retry: rtimeo = wait_for_completion_io_timeout(&bioe->done, timeo); if (rtimeo == 0) { LOGn("timeout(%d): bioe %p bio %p pos %" PRIu64 " len %u\n" , c, bioe, bioe->bio , (u64)bio_entry_pos(bioe), bio_entry_len(bioe)); c++; goto retry; } }
/** * Initialize super sector. * * @super_sect super sector image to initialize. * @lbs logical block size. * @pbs physical block size. * @ddev_lb device size [logical block]. * @ldev_lb log device size [logical block] * @name name of the walb device, or NULL. * * RETURN: * true in success. */ bool init_super_sector_raw( struct walb_super_sector* super_sect, unsigned int lbs, unsigned int pbs, u64 ddev_lb, u64 ldev_lb, const char *name) { u32 salt; char *rname; bool ret; ASSERT(super_sect); ASSERT(0 < lbs); ASSERT(0 < pbs); ASSERT(0 < ddev_lb); ASSERT(0 < ldev_lb); ASSERT(sizeof(struct walb_super_sector) <= (size_t)pbs); /* Prepare super sector */ memset(super_sect, 0, sizeof(*super_sect)); /* Set sector type. */ super_sect->sector_type = SECTOR_TYPE_SUPER; /* Fill parameters. */ super_sect->version = WALB_LOG_VERSION; super_sect->logical_bs = lbs; super_sect->physical_bs = pbs; super_sect->metadata_size = 0; /* currently fixed */ ret = generate_uuid(super_sect->uuid); if (!ret) { return false; } memset_random((u8 *)&salt, sizeof(salt)); LOGn("salt: %"PRIu32"\n", salt); super_sect->log_checksum_salt = salt; super_sect->ring_buffer_size = ldev_lb / (pbs / lbs) - get_ring_buffer_offset(pbs); super_sect->oldest_lsid = 0; super_sect->written_lsid = 0; super_sect->device_size = ddev_lb; rname = set_super_sector_name(super_sect, name); if (name && strlen(name) != strlen(rname)) { printf("name %s is pruned to %s.\n", name, rname); } ASSERT(is_valid_super_sector_raw(super_sect, pbs)); return true; }
/** * Take a checkpoint immedicately. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_take_checkpoint(struct walb_dev *wdev, struct walb_ctl *ctl) { bool ret; LOGn("WALB_IOCTL_TAKE_CHECKPOINT\n"); ASSERT(ctl->command == WALB_IOCTL_TAKE_CHECKPOINT); stop_checkpointing(&wdev->cpd); #ifdef WALB_DEBUG down_write(&wdev->cpd.lock); ASSERT(wdev->cpd.state == CP_STOPPED); up_write(&wdev->cpd.lock); #endif ret = take_checkpoint(&wdev->cpd); if (!ret) { atomic_set(&wdev->is_read_only, 1); LOGe("superblock sync failed.\n"); return -EFAULT; } start_checkpointing(&wdev->cpd); return 0; }
/** * Clear log and detect resize of log device. * * @wdev walb dev. * @ctl ioctl data. * RETURN: * 0 in success, or -EFAULT. */ static int ioctl_wdev_clear_log(struct walb_dev *wdev, struct walb_ctl *ctl) { u64 new_ldev_size, old_ldev_size; u8 new_uuid[UUID_SIZE], old_uuid[UUID_SIZE]; unsigned int pbs = wdev->physical_bs; bool is_grown = false; struct walb_super_sector *super; u64 lsid0_off; struct lsid_set lsids; u64 old_ring_buffer_size; u32 new_salt; ASSERT(ctl->command == WALB_IOCTL_CLEAR_LOG); LOGn("WALB_IOCTL_CLEAR_LOG.\n"); /* Freeze iocore and checkpointing. */ iocore_freeze(wdev); stop_checkpointing(&wdev->cpd); /* Get old/new log device size. */ old_ldev_size = wdev->ldev_size; new_ldev_size = wdev->ldev->bd_part->nr_sects; if (old_ldev_size > new_ldev_size) { LOGe("Log device shrink not supported.\n"); goto error0; } /* Backup variables. */ old_ring_buffer_size = wdev->ring_buffer_size; backup_lsid_set(wdev, &lsids); /* Initialize lsid(s). */ spin_lock(&wdev->lsid_lock); wdev->lsids.latest = 0; wdev->lsids.flush = 0; wdev->lsids.completed = 0; wdev->lsids.permanent = 0; wdev->lsids.written = 0; wdev->lsids.prev_written = 0; wdev->lsids.oldest = 0; spin_unlock(&wdev->lsid_lock); /* Grow the walblog device. */ if (old_ldev_size < new_ldev_size) { LOGn("Detect log device size change.\n"); /* Grow the disk. */ is_grown = true; if (!resize_disk(wdev->log_gd, new_ldev_size)) { LOGe("grow disk failed.\n"); iocore_set_readonly(wdev); goto error1; } LOGn("Grown log device size from %"PRIu64" to %"PRIu64".\n", old_ldev_size, new_ldev_size); wdev->ldev_size = new_ldev_size; /* Recalculate ring buffer size. */ wdev->ring_buffer_size = addr_pb(pbs, new_ldev_size) - get_ring_buffer_offset(pbs); } /* Generate new uuid and salt. */ get_random_bytes(new_uuid, 16); get_random_bytes(&new_salt, sizeof(new_salt)); wdev->log_checksum_salt = new_salt; /* Update superblock image. */ spin_lock(&wdev->lsuper0_lock); super = get_super_sector(wdev->lsuper0); memcpy(old_uuid, super->uuid, UUID_SIZE); memcpy(super->uuid, new_uuid, UUID_SIZE); super->ring_buffer_size = wdev->ring_buffer_size; super->log_checksum_salt = new_salt; /* super->metadata_size; */ lsid0_off = get_offset_of_lsid_2(super, 0); spin_unlock(&wdev->lsuper0_lock); /* Sync super sector. */ if (!walb_sync_super_block(wdev)) { LOGe("sync superblock failed.\n"); iocore_set_readonly(wdev); goto error2; } /* Invalidate first logpack */ if (!invalidate_lsid(wdev, 0)) { LOGe("invalidate lsid 0 failed.\n"); iocore_set_readonly(wdev); goto error2; } /* Clear log overflow. */ iocore_clear_log_overflow(wdev); /* Melt iocore and checkpointing. */ start_checkpointing(&wdev->cpd); iocore_melt(wdev); return 0; error2: restore_lsid_set(wdev, &lsids); wdev->ring_buffer_size = old_ring_buffer_size; #if 0 wdev->ldev_size = old_ldev_size; if (!resize_disk(wdev->log_gd, old_ldev_size)) { LOGe("resize_disk to shrink failed.\n"); } #endif error1: start_checkpointing(&wdev->cpd); iocore_melt(wdev); error0: return -EFAULT; }
/** * Execute ioctl for WALB_IOCTL_WDEV. * * return 0 in success, or -EFAULT. */ int walb_dispatch_ioctl_wdev(struct walb_dev *wdev, void __user *userctl) { int ret = -EFAULT; struct walb_ctl *ctl; /* Get ctl data. */ ctl = walb_get_ctl(userctl, GFP_KERNEL); if (!ctl) { LOGe("walb_get_ctl failed.\n"); return -EFAULT; } /* Execute each command. */ switch(ctl->command) { case WALB_IOCTL_GET_OLDEST_LSID: ret = ioctl_wdev_get_oldest_lsid(wdev, ctl); break; case WALB_IOCTL_SET_OLDEST_LSID: ret = ioctl_wdev_set_oldest_lsid(wdev, ctl); break; case WALB_IOCTL_TAKE_CHECKPOINT: ret = ioctl_wdev_take_checkpoint(wdev, ctl); break; case WALB_IOCTL_GET_CHECKPOINT_INTERVAL: ret = ioctl_wdev_get_checkpoint_interval(wdev, ctl); break; case WALB_IOCTL_SET_CHECKPOINT_INTERVAL: ret = ioctl_wdev_set_checkpoint_interval(wdev, ctl); break; case WALB_IOCTL_GET_WRITTEN_LSID: ret = ioctl_wdev_get_written_lsid(wdev, ctl); break; case WALB_IOCTL_GET_PERMANENT_LSID: ret = ioctl_wdev_get_permanent_lsid(wdev, ctl); break; case WALB_IOCTL_GET_COMPLETED_LSID: ret = ioctl_wdev_get_completed_lsid(wdev, ctl); break; case WALB_IOCTL_GET_LOG_USAGE: ret = ioctl_wdev_get_log_usage(wdev, ctl); break; case WALB_IOCTL_GET_LOG_CAPACITY: ret = ioctl_wdev_get_log_capacity(wdev, ctl); break; case WALB_IOCTL_IS_FLUSH_CAPABLE: ret = ioctl_wdev_is_flush_capable(wdev, ctl); break; case WALB_IOCTL_STATUS: ret = ioctl_wdev_status(wdev, ctl); break; case WALB_IOCTL_RESIZE: ret = ioctl_wdev_resize(wdev, ctl); break; case WALB_IOCTL_CLEAR_LOG: ret = ioctl_wdev_clear_log(wdev, ctl); break; case WALB_IOCTL_IS_LOG_OVERFLOW: ret = ioctl_wdev_is_log_overflow(wdev, ctl); break; case WALB_IOCTL_FREEZE: ret = ioctl_wdev_freeze(wdev, ctl); break; case WALB_IOCTL_MELT: ret = ioctl_wdev_melt(wdev, ctl); break; case WALB_IOCTL_IS_FROZEN: ret = ioctl_wdev_is_frozen(wdev, ctl); break; default: LOGn("WALB_IOCTL_WDEV %d is not supported.\n", ctl->command); } /* Put ctl data. */ if (walb_put_ctl(userctl, ctl) != 0) { LOGe("walb_put_ctl failed.\n"); return -EFAULT; } return ret; }