/* add_new_disk() - initiates a disk add * However, if this fails before writing md_update_sb(), * add_new_disk_cancel() must be called to release token lock */ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int ret = 0; struct mdp_superblock_1 *sb = page_address(rdev->sb_page); char *uuid = sb->device_uuid; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(NEWDISK); memcpy(cmsg.uuid, uuid, 16); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); lock_comm(cinfo); ret = __sendmsg(cinfo, &cmsg); if (ret) return ret; cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; /* Some node does not "see" the device */ if (ret == -EAGAIN) ret = -ENOENT; if (ret) unlock_comm(cinfo); else dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); return ret; }
static int metadata_update_finish(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; struct md_rdev *rdev; int ret = 0; int raid_slot = -1; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); /* Pick up a good active device number to send. */ rdev_for_each(rdev, mddev) if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { raid_slot = rdev->desc_nr; break; } if (raid_slot >= 0) { cmsg.raid_slot = cpu_to_le32(raid_slot); ret = __sendmsg(cinfo, &cmsg); } else pr_warn("md-cluster: No good device id found to send\n"); unlock_comm(cinfo); return ret; }
static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) { int ret; lock_comm(cinfo); ret = __sendmsg(cinfo, cmsg); unlock_comm(cinfo); return ret; }
static int add_new_disk_finish(struct mddev *mddev) { struct cluster_msg cmsg; struct md_cluster_info *cinfo = mddev->cluster_info; int ret; /* Write sb and inform others */ md_update_sb(mddev, 1); cmsg.type = METADATA_UPDATED; ret = __sendmsg(cinfo, &cmsg); unlock_comm(cinfo); return ret; }
static int metadata_update_finish(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int ret; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); ret = __sendmsg(cinfo, &cmsg); unlock_comm(cinfo); return ret; }
/* add_new_disk() - initiates a disk add * However, if this fails before writing md_update_sb(), * add_new_disk_cancel() must be called to release token lock */ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int ret = 0; struct mdp_superblock_1 *sb = page_address(rdev->sb_page); char *uuid = sb->device_uuid; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(NEWDISK); memcpy(cmsg.uuid, uuid, 16); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); lock_comm(cinfo); ret = __sendmsg(cinfo, &cmsg); if (ret) return ret; cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; /* Some node does not "see" the device */ if (ret == -EAGAIN) ret = -ENOENT; if (ret) unlock_comm(cinfo); else { dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); /* Since MD_CHANGE_DEVS will be set in add_bound_rdev which * will run soon after add_new_disk, the below path will be * invoked: * md_wakeup_thread(mddev->thread) * -> conf->thread (raid1d) * -> md_check_recovery -> md_update_sb * -> metadata_update_start/finish * MD_CLUSTER_SEND_LOCKED_ALREADY will be cleared eventually. * * For other failure cases, metadata_update_cancel and * add_new_disk_cancel also clear below bit as well. * */ set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); wake_up(&cinfo->wait); } return ret; }
static void add_new_disk_cancel(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; unlock_comm(cinfo); }
static void metadata_update_cancel(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; unlock_comm(cinfo); }
static void add_new_disk_cancel(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); unlock_comm(cinfo); }