int ext4_block_cache_shake(struct ext4_blockdev *bdev) { int r = EOK; struct ext4_buf *buf; if (bdev->bc->dont_shake) return EOK; bdev->bc->dont_shake = true; while (!RB_EMPTY(&bdev->bc->lru_root) && ext4_bcache_is_full(bdev->bc)) { buf = ext4_buf_lowest_lru(bdev->bc); ext4_assert(buf); if (ext4_bcache_test_flag(buf, BC_DIRTY)) { r = ext4_block_flush_buf(bdev, buf); if (r != EOK) break; } ext4_bcache_drop_buf(bdev->bc, buf); } bdev->bc->dont_shake = false; return r; }
int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b, uint64_t lba) { uint64_t pba; uint32_t pb_cnt; uint32_t i; bool is_new; int r; ext4_assert(bdev && b); if(!(bdev->flags & EXT4_BDEV_INITIALIZED)) return EIO; if(!(lba < bdev->lg_bcnt)) return ERANGE; b->dirty = 0; b->lb_id = lba; /*If cache is full we have to flush it anyway :(*/ if(ext4_bcache_is_full(bdev->bc) && bdev->cache_write_back){ uint32_t free_candidate = bdev->bc->cnt; uint32_t min_lru = 0xFFFFFFFF; for (i = 0; i < bdev->bc->cnt; ++i) { /*Check if buffer free was delayed.*/ if(!bdev->bc->free_delay[i]) continue; /*Check reference counter.*/ if(bdev->bc->refctr[i]) continue; if(bdev->bc->lru_id[i] < min_lru){ min_lru = bdev->bc->lru_id[i]; free_candidate = i; continue; } } if(free_candidate < bdev->bc->cnt){ /*Buffer free was delayed and have no reference. Flush it.*/ r = ext4_blocks_set_direct(bdev, bdev->bc->data + bdev->bc->itemsize * free_candidate, bdev->bc->lba[free_candidate], 1); if(r != EOK) return r; /*No delayed anymore*/ bdev->bc->free_delay[free_candidate] = 0; /*Reduce refered block count*/ bdev->bc->ref_blocks--; } } r = ext4_bcache_alloc(bdev->bc, b, &is_new); if(r != EOK) return r; if(!is_new){ /*Block is in cache. Read from physical device is not required*/ return EOK; } if(!b->data) return ENOMEM; pba = (lba * bdev->lg_bsize) / bdev->ph_bsize; pb_cnt = bdev->lg_bsize / bdev->ph_bsize; r = bdev->bread(bdev, b->data, pba, pb_cnt); if(r != EOK){ ext4_bcache_free(bdev->bc, b, 0); b->lb_id = 0; return r; } bdev->bread_ctr++; return EOK; }