/* * Unlike its sync counterpart, this function issues ios even for cached blocks. */ static errcode_t io_cache_vec_read_blocks(io_channel *channel, struct io_vec_unit *ivus, int count, bool nocache) { struct io_cache *ic = channel->io_cache; struct io_cache_block *icb; errcode_t ret = 0; int i, j, blksize = channel->io_blksize; uint64_t blkno; uint32_t numblks; char *buf; /* * Read all blocks. We could extend this to not issue ios for already * cached blocks. But is it worth the effort? */ ret = unix_vec_read_blocks(channel, ivus, count); if (ret) goto out; /* refresh cache */ for (i = 0; i < count; i++) { blkno = ivus[i].ivu_blkno; numblks = ivus[i].ivu_buflen / blksize; buf = ivus[i].ivu_buf; for (j = 0; j < numblks; ++j, ++blkno, buf += blksize) { icb = io_cache_lookup(ic, blkno); if (!icb) { if (nocache) continue; icb = io_cache_pop_lru(ic); icb->icb_blkno = blkno; io_cache_insert(ic, icb); } memcpy(icb->icb_buf, buf, blksize); if (nocache) io_cache_unsee(ic, icb); else io_cache_seen(ic, icb); } } out: return ret; }
/* * This relies on the fact that our cache is always up to date. If a * block is in the cache, the same thing is on disk. So here we'll write * a whole stream and update the cache as needed. */ static errcode_t io_cache_write_blocks(io_channel *channel, int64_t blkno, int count, const char *data, bool nocache) { int i, completed = 0; errcode_t ret; struct io_cache *ic = channel->io_cache; struct io_cache_block *icb; /* Get the write out of the way */ ret = unix_io_write_block_full(channel, blkno, count, data, &completed); /* * Now we sync up the cache with the data buffer. We have * to sync up I/O that completed, even if the entire I/O did not. * * In the nocache case, we want to skip blocks that weren't in the * cache, but we want to update blocks that where. Even though * the caller specified "don't cache this", it's already in the * cache. We don't want stale data. */ for (i = 0; i < completed; i++, data += channel->io_blksize) { icb = io_cache_lookup(ic, blkno + i); if (!icb) { if (nocache) continue; /* * Steal the LRU buffer. We can't error here, so * we can safely insert it before we copy the data. */ icb = io_cache_pop_lru(ic); icb->icb_blkno = blkno + i; io_cache_insert(ic, icb); } memcpy(icb->icb_buf, data, channel->io_blksize); if (nocache) io_cache_unsee(ic, icb); else io_cache_seen(ic, icb); } return ret; }
/* * This relies on the fact that our cache is always up to date. If a * block is in the cache, the same thing is on disk. Even if we re-read * the disk block, we don't need to update the cache. This allows us * to look for optimal I/O sizes; it's better to call one read 1MB of * half-cached blocks than to read every other block. * * If the caller specifies "nocache", we still want to give them anything * we found in the cache, but we want cached blocks moved to the front * of the LRU. That way they get stolen first. */ static errcode_t io_cache_read_blocks(io_channel *channel, int64_t blkno, int count, char *data, bool nocache) { int i, good_blocks; errcode_t ret = 0; struct io_cache *ic = channel->io_cache; struct io_cache_block *icb; /* * Here we check two things: * * 1) Are all the blocks cached? If so, we can skip I/O. * 2) If they are not all cached, we want to start our read at the * first uncached blkno. */ for (good_blocks = 0; good_blocks < count; good_blocks++) { icb = io_cache_lookup(ic, blkno + good_blocks); if (!icb) break; } /* Read any blocks not in the cache */ if (good_blocks < count) { ret = unix_io_read_block(channel, blkno + good_blocks, count - good_blocks, data + (channel->io_blksize * good_blocks)); if (ret) goto out; } /* Now we sync up the cache with the data buffer */ for (i = 0; i < count; i++, data += channel->io_blksize) { icb = io_cache_lookup(ic, blkno + i); if (i < good_blocks) { /* * We skipped reading this because it was in the * cache. Copy it to the data buffer. */ assert(icb); memcpy(data, icb->icb_buf, channel->io_blksize); } else if (!icb) { if (nocache) continue; /* Steal the LRU buffer */ icb = io_cache_pop_lru(ic); icb->icb_blkno = blkno + i; io_cache_insert(ic, icb); /* * We did I/O into the data buffer, now update * the cache. */ memcpy(icb->icb_buf, data, channel->io_blksize); } /* * What about if ((i >= good_blocks) && icb)? That means * we had the buffer in the cache, but we read it anyway * to get a single I/O. Our cache guarantees that the * contents will match, so we just skip to marking the * buffer seen. */ if (nocache) io_cache_unsee(ic, icb); else io_cache_seen(ic, icb); } out: return ret; }