int sr_raid1_rw(struct sr_workunit *wu) { struct sr_discipline *sd = wu->swu_dis; struct scsi_xfer *xs = wu->swu_xs; struct sr_ccb *ccb; struct sr_chunk *scp; int ios, chunk, i, rt; daddr_t blk; /* blk and scsi error will be handled by sr_validate_io */ if (sr_validate_io(wu, &blk, "sr_raid1_rw")) goto bad; /* calculate physical block */ blk += sd->sd_meta->ssd_data_offset; if (xs->flags & SCSI_DATA_IN) ios = 1; else ios = sd->sd_meta->ssdi.ssd_chunk_no; for (i = 0; i < ios; i++) { if (xs->flags & SCSI_DATA_IN) { rt = 0; ragain: /* interleave reads */ chunk = sd->mds.mdd_raid1.sr1_counter++ % sd->sd_meta->ssdi.ssd_chunk_no; scp = sd->sd_vol.sv_chunks[chunk]; switch (scp->src_meta.scm_status) { case BIOC_SDONLINE: case BIOC_SDSCRUB: break; case BIOC_SDOFFLINE: case BIOC_SDREBUILD: case BIOC_SDHOTSPARE: if (rt++ < sd->sd_meta->ssdi.ssd_chunk_no) goto ragain; /* FALLTHROUGH */ default: /* volume offline */ printf("%s: is offline, cannot read\n", DEVNAME(sd->sd_sc)); goto bad; } } else { /* writes go on all working disks */ chunk = i; scp = sd->sd_vol.sv_chunks[chunk]; switch (scp->src_meta.scm_status) { case BIOC_SDONLINE: case BIOC_SDSCRUB: case BIOC_SDREBUILD: break; case BIOC_SDHOTSPARE: /* should never happen */ case BIOC_SDOFFLINE: continue; default: goto bad; } } ccb = sr_ccb_rw(sd, chunk, blk, xs->datalen, xs->data, xs->flags, 0); if (!ccb) { /* should never happen but handle more gracefully */ printf("%s: %s: too many ccbs queued\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); goto bad; } sr_wu_enqueue_ccb(wu, ccb); } sr_schedule_wu(wu); return (0); bad: /* wu is unwound by sr_wu_put */ return (1); }
int sr_raid0_rw(struct sr_workunit *wu) { struct sr_discipline *sd = wu->swu_dis; struct scsi_xfer *xs = wu->swu_xs; struct sr_ccb *ccb; struct sr_chunk *scp; daddr_t blkno; int64_t chunkoffs, lbaoffs, offset, stripoffs; int64_t strip_bits, strip_no, strip_size; int64_t chunk, no_chunk; int64_t length, leftover; u_int8_t *data; /* blkno and scsi error will be handled by sr_validate_io */ if (sr_validate_io(wu, &blkno, "sr_raid0_rw")) goto bad; strip_size = sd->sd_meta->ssdi.ssd_strip_size; strip_bits = sd->mds.mdd_raid0.sr0_strip_bits; no_chunk = sd->sd_meta->ssdi.ssd_chunk_no; DNPRINTF(SR_D_DIS, "%s: %s: front end io: blkno %lld size %d\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, (long long)blkno, xs->datalen); /* all offs are in bytes */ lbaoffs = blkno << DEV_BSHIFT; strip_no = lbaoffs >> strip_bits; chunk = strip_no % no_chunk; stripoffs = lbaoffs & (strip_size - 1); chunkoffs = (strip_no / no_chunk) << strip_bits; offset = chunkoffs + stripoffs; length = MIN(xs->datalen, strip_size - stripoffs); leftover = xs->datalen; data = xs->data; for (;;) { /* make sure chunk is online */ scp = sd->sd_vol.sv_chunks[chunk]; if (scp->src_meta.scm_status != BIOC_SDONLINE) goto bad; DNPRINTF(SR_D_DIS, "%s: %s %s io lbaoffs %lld " "strip_no %lld chunk %lld stripoffs %lld " "chunkoffs %lld offset %lld length %lld " "leftover %lld data %p\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name, lbaoffs, strip_no, chunk, stripoffs, chunkoffs, offset, length, leftover, data); blkno = offset >> DEV_BSHIFT; ccb = sr_ccb_rw(sd, chunk, blkno, length, data, xs->flags, 0); if (!ccb) { /* should never happen but handle more gracefully */ printf("%s: %s: too many ccbs queued\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); goto bad; } sr_wu_enqueue_ccb(wu, ccb); leftover -= length; if (leftover == 0) break; data += length; if (++chunk > no_chunk - 1) { chunk = 0; offset += length; } else if (wu->swu_io_count == 1) offset -= stripoffs; length = MIN(leftover,strip_size); } sr_schedule_wu(wu); return (0); bad: /* wu is unwound by sr_wu_put */ return (1); }
int sr_concat_rw(struct sr_workunit *wu) { struct sr_discipline *sd = wu->swu_dis; struct scsi_xfer *xs = wu->swu_xs; struct sr_ccb *ccb; struct sr_chunk *scp; daddr_t blkno; int64_t lbaoffs, offset; int64_t no_chunk, chunkend, chunk, chunksize; int64_t length, leftover; u_int8_t *data; /* blkno and scsi error will be handled by sr_validate_io */ if (sr_validate_io(wu, &blkno, "sr_concat_rw")) goto bad; no_chunk = sd->sd_meta->ssdi.ssd_chunk_no; DNPRINTF(SR_D_DIS, "%s: %s: front end io: blkno %lld size %d\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, (long long)blkno, xs->datalen); /* All offsets are in bytes. */ lbaoffs = blkno << DEV_BSHIFT; leftover = xs->datalen; data = xs->data; for (;;) { chunkend = 0; offset = lbaoffs; for (chunk = 0; chunk < no_chunk; chunk++) { chunksize = sd->sd_vol.sv_chunks[chunk]->src_size << DEV_BSHIFT; chunkend += chunksize; if (lbaoffs < chunkend) break; offset -= chunksize; } if (lbaoffs > chunkend) goto bad; length = MIN(MIN(leftover, chunkend - lbaoffs), MAXPHYS); /* make sure chunk is online */ scp = sd->sd_vol.sv_chunks[chunk]; if (scp->src_meta.scm_status != BIOC_SDONLINE) goto bad; DNPRINTF(SR_D_DIS, "%s: %s %s io lbaoffs %lld " "chunk %lld chunkend %lld offset %lld length %lld " "leftover %lld data %p\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name, lbaoffs, chunk, chunkend, offset, length, leftover, data); blkno = offset >> DEV_BSHIFT; ccb = sr_ccb_rw(sd, chunk, blkno, length, data, xs->flags, 0); if (!ccb) { /* should never happen but handle more gracefully */ printf("%s: %s: too many ccbs queued\n", DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); goto bad; } sr_wu_enqueue_ccb(wu, ccb); leftover -= length; if (leftover == 0) break; data += length; lbaoffs += length; } sr_schedule_wu(wu); return (0); bad: /* wu is unwound by sr_wu_put */ return (1); }