int eip_cip_tag_read_start(ab_tag_p tag) { int rc = PLCTAG_STATUS_OK; int i; int byte_offset = 0; int debug = tag->debug; pdebug(debug,"Starting"); /* is this the first read? */ if(tag->first_read) { /* * On a new tag, the first time we read, we go through and * request the maximum possible (up to the size of the tag) * each time. We record what we actually get back in the * tag->read_req_sizes array. The next time we read, we * use that array to make the new requests. */ rc = allocate_read_request_slot(tag); if(rc != PLCTAG_STATUS_OK) { tag->status = rc; return rc; } /* * The PLC may not send back as much data as we would like. * So, we attempt to determine what the size will be by * single-stepping through the requests the first time. * This will be slow, but subsequent reads will be pipelined. */ /* determine the byte offset this time. */ byte_offset = 0; /* scan and add the byte offsets */ for(i=0; i < tag->num_read_requests && tag->reqs[i]; i++) { byte_offset += tag->read_req_sizes[i]; } pdebug(debug,"First read tag->num_read_requests=%d, byte_offset=%d.",tag->num_read_requests,byte_offset); /* i is the index of the first new request */ rc = build_read_request(tag, i, byte_offset); if(rc != PLCTAG_STATUS_OK) { tag->status = rc; return rc; } } else { /* this is not the first read, so just set up all the requests at once. */ byte_offset = 0; for(i=0; i < tag->num_read_requests; i++) { rc = build_read_request(tag, i, byte_offset); if(rc != PLCTAG_STATUS_OK) { tag->status = rc; return rc; } byte_offset += tag->read_req_sizes[i]; } } /* mark the tag read in progress */ tag->read_in_progress = 1; /* the read is now pending */ tag->status = PLCTAG_STATUS_PENDING; pdebug(debug,"Done."); return PLCTAG_STATUS_PENDING; }
/* * Start a transfer. Return -1 on error, * 0 if OK, 1 if we need to retry. * Parameter reviveok is set when doing * transfers for revives: it allows transfers to * be started immediately when a revive is in * progress. During revive, normal transfers * are queued if they share address space with * a currently active revive operation. */ int vinumstart(struct buf *bp, int reviveok) { int plexno; int maxplex; /* maximum number of plexes to handle */ struct volume *vol; struct request *rq; /* build up our request here */ enum requeststatus status; #if VINUMDEBUG if (debug & DEBUG_LASTREQS) logrq(loginfo_user_bp, (union rqinfou) bp, bp); #endif if ((bp->b_bcount % DEV_BSIZE) != 0) { /* bad length */ bp->b_error = EINVAL; /* invalid size */ bp->b_flags |= B_ERROR; biodone(bp); return -1; } rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */ if (rq == NULL) { /* can't do it */ bp->b_error = ENOMEM; /* can't get memory */ bp->b_flags |= B_ERROR; biodone(bp); return -1; } bzero(rq, sizeof(struct request)); /* * Note the volume ID. This can be NULL, which * the request building functions use as an * indication for single plex I/O */ rq->bp = bp; /* and the user buffer struct */ if (DEVTYPE(bp->b_dev) == VINUM_VOLUME_TYPE) { /* it's a volume, */ rq->volplex.volno = Volno(bp->b_dev); /* get the volume number */ vol = &VOL[rq->volplex.volno]; /* and point to it */ vol->active++; /* one more active request */ maxplex = vol->plexes; /* consider all its plexes */ } else { vol = NULL; /* no volume */ rq->volplex.plexno = Plexno(bp->b_dev); /* point to the plex */ rq->isplex = 1; /* note that it's a plex */ maxplex = 1; /* just the one plex */ } if (bp->b_flags & B_READ) { /* * This is a read request. Decide * which plex to read from. * * There's a potential race condition here, * since we're not locked, and we could end * up multiply incrementing the round-robin * counter. This doesn't have any serious * effects, however. */ if (vol != NULL) { vol->reads++; plexno = vol->preferred_plex; /* get the plex to use */ if (plexno < 0) { /* round robin */ plexno = vol->last_plex_read; vol->last_plex_read++; if (vol->last_plex_read >= vol->plexes) /* got the the end? */ vol->last_plex_read = 0; /* wrap around */ } status = build_read_request(rq, plexno); /* build a request */ } else { daddr_t diskaddr = bp->b_blkno; /* start offset of transfer */ status = bre(rq, /* build a request list */ rq->volplex.plexno, &diskaddr, diskaddr + (bp->b_bcount / DEV_BSIZE)); } if ((status > REQUEST_RECOVERED) /* can't satisfy it */ ||(bp->b_flags & B_DONE)) { /* XXX shouldn't get this without bad status */ if (status == REQUEST_DOWN) { /* not enough subdisks */ bp->b_error = EIO; /* I/O error */ bp->b_flags |= B_ERROR; } biodone(bp); freerq(rq); return -1; } return launch_requests(rq, reviveok); /* now start the requests if we can */ } else /* * This is a write operation. We write to all plexes. If this is * a RAID-4 or RAID-5 plex, we must also update the parity stripe. */ { if (vol != NULL) { vol->writes++; status = build_write_request(rq); /* Not all the subdisks are up */ } else { /* plex I/O */ daddr_t diskstart; diskstart = bp->b_blkno; /* start offset of transfer */ status = bre(rq, Plexno(bp->b_dev), &diskstart, bp->b_blkno + (bp->b_bcount / DEV_BSIZE)); /* build requests for the plex */ } if ((status > REQUEST_RECOVERED) /* can't satisfy it */ ||(bp->b_flags & B_DONE)) { /* XXX shouldn't get this without bad status */ if (status == REQUEST_DOWN) { /* not enough subdisks */ bp->b_error = EIO; /* I/O error */ bp->b_flags |= B_ERROR; } if ((bp->b_flags & B_DONE) == 0) biodone(bp); freerq(rq); return -1; } return launch_requests(rq, reviveok); /* now start the requests if we can */ } }