void cache_invalidate_block(block_sector_t sector) { struct cache_block * curr_block = cache_find_block(sector); if (curr_block != NULL) { curr_block->valid = 0; lock_release(&curr_block->modify_variables); } }
/* write size bytes from buffer into cache block */ void cache_write (block_sector_t bid, const void *buffer, int offset, int size) { ASSERT(offset < BLOCK_SECTOR_SIZE); ASSERT(offset + size <= BLOCK_SECTOR_SIZE); /* find cache block */ int cache_id = cache_find_block(bid, CACHE_WRITE); //if(((unsigned) buffer) == 134556864) printf("CACHE a, bid:%u\n", bid); /* if cache block not present, load */ if(cache_id == -1) { cache_id = cache_add(bid); /* increment number of reader manually */ cache[cache_id]->writer++; } //if(((unsigned) buffer) == 134556864) printf("CACHE b\n"); ASSERT(cache_id != -1); /* copy buffer content into cache */ memcpy (cache[cache_id]->kpage + offset, buffer, size); cache[cache_id]->accessed = true; cache[cache_id]->dirty = true; cache[cache_id]->writer--; if(CACHE_DEBUG) printf("wrote cache %u: @offset %i from buffer %x with size %i\n", (unsigned int) cache_id, offset, (unsigned int) buffer, size); }
/* read size bytes from block bid beginning at offset into buffer */ void cache_read (block_sector_t bid, void * buffer, int offset, int size) { ASSERT(offset < BLOCK_SECTOR_SIZE); int cache_id = cache_find_block(bid, CACHE_READ); /* if not found load into cache */ if(cache_id == -1) { /* copy block from disk */ cache_id = cache_add(bid); ASSERT(cache_id != -1); /* increment number of reader manually */ cache[cache_id]->reader++; /* add read-ahead block */ //TODO cache_readahead(bid + 1); } /* copy the corresponding section into buffer */ memcpy (buffer, cache[cache_id]->kpage + offset, size); /* update cache flags */ cache[cache_id]->accessed = true; cache[cache_id]->reader--; if(CACHE_DEBUG) printf("read cache %u\n", (unsigned int) cache_id); }
/* load read-ahead block */ void cache_readahead (block_sector_t bid) { /* if block is not in cache, load it */ if (cache_find_block(bid,-1) == -1) { cache_add (bid); } }
struct cache_block * cache_shared_pre(block_sector_t sect) { struct cache_block* curr_block = cache_find_block(sect); if (curr_block == NULL) { // eviction needs to occurs curr_block = cache_evict_block(sect); } // Block has been found valid in the cache and this process now owns the entries lock curr_block->accessors++; lock_release(&curr_block->modify_variables); return curr_block; }
struct cache_block * cache_evict_block(block_sector_t sect) { lock_acquire(&eviction_lock); struct cache_block * curr_block = cache_find_block(sect); if (curr_block == NULL) { while (true) { if (clock_hand == 64) { clock_hand = 0; } curr_block = &cache[clock_hand]; lock_acquire(&curr_block->modify_variables); if (curr_block->use && curr_block->valid) { curr_block->use = 0; lock_release(&curr_block->modify_variables); clock_hand++; } else { clock_hand++; break; } } // At this point we found an entry to evict and the process owns its modify_variables lock and it has been marked invalid curr_block->evict_penders++; while (curr_block->accessors > 0) { cond_wait(&curr_block->need_to_evict, &curr_block->modify_variables); } curr_block->evict_penders--; if (curr_block->dirty && curr_block->valid) { curr_block->valid = 0; device_writes++; block_write(fs_device, curr_block->sect, curr_block->data); // cache_to_disk(curr_block); ACQUIRING LOCK NOT NECESSARY curr_block->dirty = 0; } // Read directly into the cache without the lock since nothing can modify this entry due to it being invalid // Should not be a sychronization problem block_read(fs_device, sect, curr_block->data); curr_block->sect = sect; curr_block->valid = 1; curr_block->use = 0; } lock_release(&eviction_lock); return curr_block; }