static void __clone_and_map(struct clone_info *ci) { struct bio *clone, *bio = ci->bio; struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); struct target_io *tio; /* * Allocate a target io object. */ tio = alloc_tio(ci->md); tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); if (ci->sector_count <= max) { /* * Optimise for the simple case where we can do all of * the remaining io with a single clone. */ clone = clone_bio(bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, ci->sector_count, ci->md->bs); __map_bio(ti, clone, tio); ci->sector_count = 0; } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { /* * There are some bvecs that don't span targets. * Do as many of these as possible. */ int i; sector_t remaining = max; sector_t bv_len; for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { bv_len = to_sector(bio->bi_io_vec[i].bv_len); if (bv_len > remaining) break; remaining -= bv_len; len += bv_len; } clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, ci->md->bs); __map_bio(ti, clone, tio); ci->sector += len; ci->sector_count -= len; ci->idx = i; } else { /* * Handle a bvec that must be split between two or more targets. */ struct bio_vec *bv = bio->bi_io_vec + ci->idx; sector_t remaining = to_sector(bv->bv_len); unsigned int offset = 0; do { if (offset) { ti = dm_table_find_target(ci->map, ci->sector); max = max_io_len(ci->md, ci->sector, ti); tio = alloc_tio(ci->md); tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); } len = min(remaining, max); clone = split_bvec(bio, ci->sector, ci->idx, bv->bv_offset + offset, len, ci->md->bs); __map_bio(ti, clone, tio); ci->sector += len; ci->sector_count -= len; offset += to_bytes(len); } while (remaining -= len); ci->idx++; } }
static void __clone_and_map(struct clone_info *ci) { struct bio *clone, *bio = ci->bio; struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); struct target_io *tio; /* * Allocate a target io object. */ tio = alloc_tio(ci->md); tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); if (ci->sector_count <= max) { /* * Optimise for the simple case where we can do all of * the remaining io with a single clone. */ clone = clone_bio(bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, ci->sector_count); __map_bio(ti, clone, tio); ci->sector_count = 0; } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { /* * There are some bvecs that don't span targets. * Do as many of these as possible. */ int i; sector_t remaining = max; sector_t bv_len; for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { bv_len = to_sector(bio->bi_io_vec[i].bv_len); if (bv_len > remaining) break; remaining -= bv_len; len += bv_len; } clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len); __map_bio(ti, clone, tio); ci->sector += len; ci->sector_count -= len; ci->idx = i; } else { /* * Create two copy bios to deal with io that has * been split across a target. */ struct bio_vec *bv = bio->bi_io_vec + ci->idx; clone = split_bvec(bio, ci->sector, ci->idx, bv->bv_offset, max); __map_bio(ti, clone, tio); ci->sector += max; ci->sector_count -= max; ti = dm_table_find_target(ci->map, ci->sector); len = to_sector(bv->bv_len) - max; clone = split_bvec(bio, ci->sector, ci->idx, bv->bv_offset + to_bytes(max), len); tio = alloc_tio(ci->md); tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); __map_bio(ti, clone, tio); ci->sector += len; ci->sector_count -= len; ci->idx++; } }
/**ltl * 功能: 将bio请求分割成多个bio,并分发到多个目标设备中 * 参数: * 返回值: * 说明: 此 */ static void __clone_and_map(struct clone_info *ci) { struct bio *clone, *bio = ci->bio; /* 根据请求的起始扇区获取目标设备 */ struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); /* 可以请求的最大数据长度 */ struct target_io *tio; /* * Allocate a target io object. */ tio = alloc_tio(ci->md); tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); /* [step1]请求的数据长度没有超出目标设备的剩下的数据长度,则一次操作就可以完成 */ if (ci->sector_count <= max) { /* * Optimise for the simple case where we can do all of * the remaining io with a single clone. */ /* 拷贝bio对象 */ clone = clone_bio(bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, ci->sector_count); /* 将新的bio请求映射为目标设备的请求 */ __map_bio(ti, clone, tio); ci->sector_count = 0; }/*[step2]请求的数据长度已经超过目标设备空间,但是idx下标所指的数组项在目标设备空间范围之内 */ else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { /* * There are some bvecs that don't span targets. * Do as many of these as possible. */ int i; sector_t remaining = max; sector_t bv_len; /* 求出在此目标设备可以传输的最大数据长度 */ for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { bv_len = to_sector(bio->bi_io_vec[i].bv_len); /* 此bio_vec的数据长度已经超出目标设备的空间 * 表明bio->bi_io_vec[i]中的数据跨越两个目标设备,因此剩余数据要走[step3]中的流程 */ if (bv_len > remaining) break; remaining -= bv_len; len += bv_len; } /* 克隆bio对象 */ clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len); /* 将bio对象映射到目标设备 */ __map_bio(ti, clone, tio); /* 剩下请求的起始扇区 */ ci->sector += len; /* 剩下的扇区数,若此字段不为0,则会转到[step3]执行 */ ci->sector_count -= len; /* 数据下标 */ ci->idx = i; } else {/* [step3]表示bio->bi_io_vec[i]请求数据跨越了两个目标设备,因此要对其分割 */ /* * Handle a bvec that must be split between two or more targets. */ /* 跨越两个目标设备的bio_vec对象 */ struct bio_vec *bv = bio->bi_io_vec + ci->idx; /* 数据长度 */ sector_t remaining = to_sector(bv->bv_len); unsigned int offset = 0; /* 将bio_vec分割成多份,分发到目标设备中 */ do { if (offset) { ti = dm_table_find_target(ci->map, ci->sector); /* 在btree中查找 */ max = max_io_len(ci->md, ci->sector, ti); tio = alloc_tio(ci->md); tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); } len = min(remaining, max); /* 数据长度 */ /* 分割bio_vec请求 */ clone = split_bvec(bio, ci->sector, ci->idx, bv->bv_offset + offset, len); /* 将请求映射到目标设备 */ __map_bio(ti, clone, tio); ci->sector += len; /* dm的起始扇区号 */ ci->sector_count -= len; /* 剩下的扇区数 */ offset += to_bytes(len); /* bio_vec的数据偏移量 */ } while (remaining -= len); /* 将下标指向下了个bio_vec数组项 */ ci->idx++; } }