예제 #1
0
파일: cache.c 프로젝트: jnaneshm/614_hw4
/* unlink BLK from the hash table bucket chain in SET */
static void
unlink_htab_ent(struct cache_t *cp,		/* cache to update */
		struct cache_set_t *set,	/* set containing bkt chain */
		struct cache_blk_t *blk)	/* block to unlink */
{
  struct cache_blk_t *prev, *ent;
  int index = CACHE_HASH(cp, blk->tag);

  /* locate the block in the hash table bucket chain */
  for (prev=NULL,ent=set->hash[index];
       ent;
       prev=ent,ent=ent->hash_next)
    {
      if (ent == blk)
	break;
    }
  assert(ent);

  /* unlink the block from the hash table bucket chain */
  if (!prev)
    {
      /* head of hash bucket list */
      set->hash[index] = ent->hash_next;
    }
  else
    {
      /* middle or end of hash bucket list */
      prev->hash_next = ent->hash_next;
    }
  ent->hash_next = NULL;
}
static inline void
cache_invalidate_symbol (repv symbol)
{
    unsigned int hash = CACHE_HASH (symbol);
    if (ref_cache[hash].s != 0 && ref_cache[hash].n->symbol == symbol)
	ref_cache[hash].s = 0;
}
static inline void
enter_cache (rep_struct *s, rep_struct_node *binding)
{
    unsigned int hash = CACHE_HASH (binding->symbol);
    int i, oldest_i, oldest_age = INT_MAX;
    for (i = 0; i < CACHE_ASSOC; i++)
    {
	if (ref_cache[hash][i].s == 0)
	{
	    oldest_i = i;
	    break;
	}
	else if (ref_cache[hash][i].age < oldest_age)
	{
	    oldest_i = i;
	    oldest_age = ref_cache[hash][i].age;
	}
    }
    assert (oldest_i < CACHE_ASSOC);
#ifdef DEBUG
    if (ref_cache[hash][oldest_i].s != 0)
    {
	if (ref_cache[hash][oldest_i].n->symbol == binding->symbol)
	    ref_cache_conflicts++;
	else
	    ref_cache_collisions++;
    }
#endif
    ref_cache[hash][oldest_i].s = s;
    ref_cache[hash][oldest_i].n = binding;
    ref_cache[hash][oldest_i].age = ++ref_age;
}
예제 #4
0
/* flush the block containing ADDR from the cache CP, returns the latency of
   the block flush operation */
unsigned int				/* latency of flush operation */
cache_flush_addr(struct cache_t *cp,	/* cache instance to flush */
		 md_addr_t addr,	/* address of block to flush */
		 tick_t now)		/* time of cache flush */
{
  fprintf( stderr, "flush address\n" );
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  struct cache_blk_t *blk;
  int lat = cp->hit_latency; /* min latency to probe cache */

  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    break;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    break;
	}
    }

  if (blk)
    {
      cp->invalidations++;
      blk->status &= ~CACHE_BLK_VALID;

      /* blow away the last block to hit */
      cp->last_tagset = 0;
      cp->last_blk = NULL;

      if (blk->status & CACHE_BLK_DIRTY)
	{
	  /* write back the invalidated block */
          cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, blk->tag, set),
				   cp->bsize, blk, now+lat);
	}
      /* move this block to tail of the way (LRU) list */
      update_way_list(&cp->sets[set], blk, Tail);
    }

  /* return latency of the operation */
  return lat;
}
예제 #5
0
/* return non-zero if block containing address ADDR is contained in cache
 CP, this interface is used primarily for debugging and asserting cache
 invariants */
int /* non-zero if access would hit */
cache_probe(struct cache_t *cp, /* cache instance to probe */
md_addr_t addr) /* address of block to probe */
{
	md_addr_t tag = CACHE_TAG(cp, addr);
	md_addr_t set = CACHE_SET(cp, addr);
	struct cache_blk_t *blk;

	/* permissions are checked on cache misses */

	if (cp->hsize) {
		/* higly-associativity cache, access through the per-set hash tables */
		int hindex = CACHE_HASH(cp, tag);

		for (blk = cp->sets[set].hash[hindex]; blk; blk = blk->hash_next) {
			if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
				return TRUE;
		}
	} else {
		/* low-associativity cache, linear search the way list */
		for (blk = cp->sets[set].way_head; blk; blk = blk->way_next) {
			if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
				return TRUE;
		}
	}

	/* cache block not found */
	return FALSE;
}
예제 #6
0
repv
property_cache_ref (repv id, repv prop)
{
    unsigned int h, i;

    if (cache_vec == rep_NULL)
	return rep_NULL;

    h = CACHE_HASH (id, prop) * CACHE_ASSOC;

    DB (("prop ref: 0x%x,%s (%d) -> ", id, rep_STR (rep_SYM (prop)->name), h));

    for (i = h; i < h + CACHE_ASSOC; i++)
    {
	if (cache_ids[i] == id && cache_props[i] == prop)
	{
	    cache_hits++;
	    DB (("hit\n"));
	    cache_ages[i] = ++cache_clock;
	    return cache_values[i];
	}
    }

    DB (("miss\n"));
    cache_misses++;
    return rep_NULL;
}
예제 #7
0
void
property_cache_invalidate (repv id, repv prop)
{
    unsigned int h, i;

    if (cache_vec == rep_NULL)
	return;

    h = CACHE_HASH (id, prop) * CACHE_ASSOC;

    for (i = h; i < h + CACHE_ASSOC; i++)
    {
	if (cache_ids[i] == id && cache_props[i] == prop)
	{
	    if (cache_updates[i] == 0)
	    {
		cache_ids[i] = 0;
		cache_props[i] = Qnil;
		cache_values[i] = Qnil;
	    }
	    else
		cache_updates[i]--;
	}
    }
}
예제 #8
0
/* insert BLK onto the head of the hash table bucket chain in SET */
static void link_htab_ent(struct cache_t *cp, /* cache to update */
struct cache_set_t *set, /* set containing bkt chain */
struct cache_blk_t *blk) /* block to insert */
{
	int index = CACHE_HASH(cp, blk->tag);

	/* insert block onto the head of the bucket chain */
	blk->hash_next = set->hash[index];
	set->hash[index] = blk;
}
static inline void
cache_invalidate_symbol (repv symbol)
{
    unsigned int hash = CACHE_HASH (symbol);
    int i;
    for (i = 0; i < CACHE_ASSOC; i++)
    {
	if (ref_cache[hash][i].s != 0
	    && ref_cache[hash][i].n->symbol == symbol)
	{
	    ref_cache[hash][i].s = 0;
	}
    }
}
예제 #10
0
void
property_cache_set (repv id, repv prop, repv value, int invals)
{
    unsigned int h, i, oldest, oldest_age;

    if (cache_vec == rep_NULL)
    {
	cache_vec = Fmake_vector (rep_MAKE_INT (CACHE_SIZE * 3), Qnil);
	rep_mark_static (&cache_vec);

	cache_ids = rep_VECT (cache_vec)->array;
	cache_props = cache_ids + CACHE_SIZE;
	cache_values = cache_props + CACHE_SIZE;
    }

    h = CACHE_HASH (id, prop) * CACHE_ASSOC;

    oldest_age = UINT_MAX;
    oldest = -1;

    for (i = h; i < h + CACHE_ASSOC; i++)
    {
	if (cache_ids[i] == id && cache_props[i] == prop)
	{
	    cache_values[i] = value;
	    cache_updates[i] += invals;
	    return;
	}

	if (cache_ages[i] <= oldest_age)
	{
	    oldest_age = cache_ages[i];
	    oldest = i;
	}
    }

    assert (oldest != -1);

    if (cache_ids[oldest] != 0)
	DB (("prop eject: 0x%x (%d)\n", cache_ids[oldest], oldest));

    cache_ids[oldest] = id;
    cache_props[oldest] = prop;
    cache_values[oldest] = value;
    cache_ages[oldest] = ++cache_clock;
    cache_updates[oldest] = invals;

    DB (("set: 0x%x,%s (%d)\n", id, rep_STR (rep_SYM (prop)->name), oldest));
}
예제 #11
0
static inline void
enter_cache (rep_struct *s, rep_struct_node *binding)
{
    unsigned int hash = CACHE_HASH (binding->symbol);
    if (ref_cache[hash].s != 0)
    {
#ifdef DEBUG
	if (ref_cache[hash].n->symbol == binding->symbol)
	    ref_cache_conflicts++;
	else
	    ref_cache_collisions++;
#endif
    }
    ref_cache[hash].s = s;
    ref_cache[hash].n = binding;
}
예제 #12
0
static inline rep_struct_node *
lookup_cache (rep_struct *s, repv var)
{
    unsigned int hash = CACHE_HASH (var);
    if (ref_cache[hash].s == s && ref_cache[hash].n->symbol == var)
    {
#ifdef DEBUG
	ref_cache_hits++;
#endif
	return ref_cache[hash].n;
    }
    else
    {
#ifdef DEBUG
	ref_cache_misses++;
#endif
	return 0;
    }
}
예제 #13
0
static inline rep_struct_node *
lookup_cache (rep_struct *s, repv var)
{
    unsigned int hash = CACHE_HASH (var);
    int i;
    for (i = 0; i < CACHE_ASSOC; i++)
    {
	if (ref_cache[hash][i].s == s && ref_cache[hash][i].n->symbol == var)
	{
#ifdef DEBUG
	    ref_cache_hits++;
#endif
	    ref_cache[hash][i].age++;
	    return ref_cache[hash][i].n;
	}
    }
#ifdef DEBUG
    ref_cache_misses++;
#endif
    return 0;
}
예제 #14
0
파일: cache.cpp 프로젝트: swlpark/ece552
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr,	/* for address of replaced block */
	     int prefetch)		/* 1 if the access is a prefetch, 0 if it is not */
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);
  struct cache_blk_t *blk, *repl;
  int lat = 0;

  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }

  /* cache block not found */

  /* **MISS** */
  if (prefetch == 0 ) {

     cp->misses++;

     if (cmd == Read) {	
	cp->read_misses++;
     }
  }

  /* ECE552 Assignment 4 - BEGIN CODE */
  if (strcmp(cp->name, "dl1") == 0) {
    for(std::list<evicted_tag>::iterator it = evicted_blks[set].begin(); it != evicted_blks[set].end(); ++it)
    {
       if(it->tag == tag && it->prefetched) {
         //move element to the front of the list
         if(it != evicted_blks[set].begin()) {
           std::list<evicted_tag>::iterator tmp = it; 
           evicted_blks[set].splice(evicted_blks[set].begin(), evicted_blks[set], tmp, ++it);
         }
         cp->prefetch_misses++;
         break;
       }
    }
  }
  /* ECE552 Assignment 4 - END CODE */


  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat, 0);
	}
    }

  /* ECE552 Assignment 4 - BEGIN CODE */
  /* evicted cache_blk */

  if (strcmp(cp->name, "dl1") == 0) {
    if (evicted_blks[set].size() < cp->assoc) {
       evicted_blks[set].push_front({false, repl->tag});
    } else {
       evicted_blks[set].pop_back();
       evicted_blks[set].push_front({false, repl->tag});
    }
  }
  /* ECE552 Assignment 4 - END CODE */


  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */

  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat, prefetch);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  if (prefetch == 0) {	/* only regular cache accesses can generate a prefetch */
  	generate_prefetch(cp, addr);
  }

  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  if (prefetch == 0) {

     cp->hits++;

     if (cmd == Read) {	
	   cp->read_hits++;
     }
  }
  /* ECE552 Assignment 4 - BEGIN CODE */
  if (blk->prefetched){
    if(blk->prefetch_used == 0) {
       blk->prefetch_used = 1;
       cp->prefetch_useful_cnt++;
    }
  }
  /* ECE552 Assignment 4 - END CODE */


  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  if (prefetch == 0) {	/* only regular cache accesses can generate a prefetch */
	generate_prefetch(cp, addr);
  }


  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  if (prefetch == 0) {
     
     cp->hits++;

     if (cmd == Read) {	
        cp->read_hits++;
     }
  }
  /* ECE552 Assignment 4 - BEGIN CODE */
  if (blk->prefetched){
    if(blk->prefetch_used == 0) {
       blk->prefetch_used = 1;
       cp->prefetch_useful_cnt++;
    }
  }
  /* ECE552 Assignment 4 - END CODE */


  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  if (prefetch == 0) {	/* only regular cache accesses can generate a prefetch */
     generate_prefetch(cp, addr);
  }

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}
예제 #15
0
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);
  struct cache_blk_t *blk, *repl;
  int lat = 0;

  if(cp->isL2){
    if(set > 512){
      fprintf(stderr, "Houston we have a problem, set = %d\n", set);
      scanf("%d", &lat);
    }
  }

  int pointerLat = 0;

  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }



  /*FP-JS Loc will store the last line traversed through the list
  I want to keep set so I know where the head of the list is for replacement
  */
  unsigned int loc = set; 
  /*FP-BC Modified cache hit checker for new cache structure*/
  if(cp->isL2)
  {
       /*FP-BC continue through each linked set with data*/
       while(cp->sets[loc].usageCtr)
       {

      //if(cp->isL2)
      //fprintf(stderr, "ptr = %d, loc = %d", cp->sets[loc].fwdPtr, loc);
          if (cp->hsize)
          {
              /* higly-associativity cache, access through the per-set hash tables */
              int hindex = CACHE_HASH(cp, tag);

              for (blk=cp->sets[loc].hash[hindex]; blk; blk=blk->hash_next)
              {
                  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)){
                    //fprintf(stderr, "Hit!");
                    goto cache_hit;
                  }
              }
          }

          else
          {
              /* low-associativity cache, linear search the way list */
              for (blk=cp->sets[loc].way_head; blk; blk=blk->way_next)
              {
                  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)){
                       //                 fprintf(stderr, "Hit!");
                    goto cache_hit;
                  }
              }
          }

          /*FP-BC If the current set has a pointer to another set,
            follow it and check again for a hit*/
          if(cp->sets[loc].fwdPtr){
            loc = cp->sets[loc].fwdPtr;
            pointerLat+=1;
          }
          else
            break;
       }
  }

  /*FP-BC Original cache hit checker*/
  else
  {
      if (cp->hsize)
      {
          /* higly-associativity cache, access through the per-set hash tables */
          int hindex = CACHE_HASH(cp, tag);

          for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next)
          {
              if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
                goto cache_hit;
          }
      }
      else
      {
          /* low-associativity cache, linear search the way list */
          for (blk=cp->sets[set].way_head; blk; blk=blk->way_next)
          {
              if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
                goto cache_hit;
          }
      }
  }

  /* cache block not found */

  /* **MISS** */
  cp->misses++;

    enum cache_policy policy = cp->policy;

  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  if(cp->isL2){
    if(cp->sets[loc].fullBit == 0){
      //fprintf(stderr, "FIFO\n" );
      policy = FIFO; //use FIFO to fill the set
      cp->sets[loc].usageCtr++;
      if(loc != set)
        cp->sets[set].usageCtr++;
      if(cp->sets[loc].usageCtr == cp->assoc){
        cp->sets[loc].fullBit = 1; // set full if adding to the set reached its assoc
      }
      set = loc;
    }
    else if(cp->fullFlag == 0){
      int numSets = cp->sets[set].usageCtr/cp->assoc;
      if(numSets < 5){ //only add a line if the linked list is less than 5 length
        //fprintf(stderr, "adding a pointer. FSR = %d, loc = %d\n", cp->FSR, cp->nsets, numSets, loc);
        policy = FIFO;
        cp->sets[loc].usageCtr++;
        if(loc != set)
          cp->sets[set].usageCtr++;
        cp->sets[loc].fwdPtr = cp->FSR;
        if(loc == cp->FSR){
          fprintf(stderr, "FSR = %d loc = %d\n", cp->FSR,  loc);
         // scanf("%s", &loc);
        }
        set = cp->FSR;
        cp->FSR = 1 + (cp->FSR);
        if(cp->FSR >= cp->nsets*2){
          cp->fullFlag = 1;
        }
      }
      else {
        //fprintf(stderr, "more than 5 pointers\n");
        int times = rand() % numSets;
        int i = 0;
        for(i = 0; i < times; i++){
          if(cp->sets[set].fwdPtr != 0){
            set = cp->sets[set].fwdPtr;
                        pointerLat+=1;
          }
        }
      }
    }
    else{ //else everything is full so randomly select a set
      //fprintf(stderr, "evicting a set\n");
      int numSets = cp->sets[set].usageCtr/cp->assoc;
      //fprintf(stderr, "numSets is %d\n", numSets);
      int times = myrand();
      times = times % numSets;
      //fprintf(stderr, "times is %d\n", times);
      int i = 0;
      for(i = 0; i < times; i++){
        if(cp->sets[set].fwdPtr != 0){
          set = cp->sets[set].fwdPtr;
                      pointerLat+=1;
        }
      }
    }
  }
  switch (policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      //fprintf(stderr, "here\n" );
      int bindex = myrand();
      bindex = bindex & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
      //fprintf(stderr, "exiting\n" );
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);

      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);

      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));

      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }

  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */

  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }



  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* return latency of the operation */
  return lat+pointerLat;


 cache_hit: /* slow hit handler */

  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata){
    *udata = blk->user_data;
    
  }

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency+pointerLat, pointerLat+(blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(pointerLat+cp->hit_latency, pointerLat+(blk->ready - now));
}
예제 #16
0
파일: cache.cpp 프로젝트: swlpark/ece552
/* ECE552 Assignment 4 - BEGIN CODE */
void fetch_cache_blk (struct cache_t *cp, md_addr_t addr) {
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);

  int lat = 0;
  struct cache_blk_t *blk, *repl;

  //check if the block already exists in cache
  if (cp->hsize) {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next){
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    return;
      }
  } else {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) {
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    return;
      }
  }
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* evicted cache_blk */
  if (evicted_blks[set].size() < cp->assoc) {
     evicted_blks[set].push_front({true, repl->tag});
  } else {
     evicted_blks[set].pop_back();
     evicted_blks[set].push_front({true, repl->tag});
  }

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID) {
      cp->replacements++;
      if (repl->status & CACHE_BLK_DIRTY)
      {
        /* write back the cache block */
        cp->writebacks++;
        lat += cp->blk_access_fn(Write,
      			   CACHE_MK_BADDR(cp, repl->tag, set),
      			   cp->bsize, repl, 0, 0);
      }
  }
  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */
  repl->prefetched = 1;
  repl->prefetch_used = 0;

  /* read data block */
  cp->prefetch_cnt += 1;
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, NULL, 0);

  /* update block status */
  repl->ready = NULL;

  /* link this entry back into the hash table */
  if (cp->hsize)
     link_htab_ent(cp, &cp->sets[set], repl);

}
예제 #17
0
파일: cache.c 프로젝트: kuthulas/PDP
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);

  struct cache_blk_t *blk, *repl;
  struct pdp_fifo_node *fnode, *tnode;
  int lat = 0;

  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* PDP distance decrement on set access */
  if(cp->policy == PDP){
    for (blk=cp->sets[set].way_head;
      blk;
      blk=blk->way_next)
    {
      if (blk->rpd > 0) blk->rpd--;
    }
    /* PDP counter update */
    cp->PDP_Nt++;
    if((cp->PDP_Nt % 50000) == 0) compute_pd(cp);
  }

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }

  /* cache block not found */

  /* **MISS** */
  cp->misses++;

  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  case PDP:
  {
    int bindex, nindex=-1, iindex=1, rindex=1, max_irpd=-1, max_rrpd=-1;
    for (bindex=cp->assoc-1, blk=cp->sets[set].way_head;
      blk;
      bindex--, blk=blk->way_next)
    {
     // if(blk->rpd!=0) printf("BLK: %d, RPD: %d\n", bindex, blk->rpd);
      /* victim is the first-found unprotected line */
      if(blk->rpd == 0) {
        nindex = bindex;
        break;
      }
      
      /* victim selection if there are no unprotected lines */
      /* replace inserted line with highest rpd, if none found replace reused line with highest rpd */
      if(blk->rpd > max_irpd){
        if(blk->reused == 0){
          max_irpd = blk->rpd;
          iindex = bindex;
        }
      }
      if(blk->rpd > max_rrpd){
        if(blk->reused == 1){
          max_rrpd = blk->rpd;
          rindex = bindex;
        }
      }
    }

    if(nindex==-1) {
      nindex = ((max_irpd == -1) ? rindex : iindex);
      repl = cp->sets[set].way_tail;
    }
    else repl = CACHE_BINDEX(cp, cp->sets[set].blks, nindex);
    update_way_list(&cp->sets[set], repl, Head);
    repl->rpd = PDP_PD - 1; /* PDP distance decrement on block replacement */
    repl->reused = 0;
  }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */

  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }

  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */
  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;
  
  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && ((cp->policy == LRU) || (cp->policy == PDP)))
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* PDP reuse routine */
    if(cp->policy == PDP){
      blk->rpd = PDP_PD - 1;
      blk->reused = 1;

      /* RD sampler and PDP counter updates */
      int rd;

      for (rd=0, fnode=cp->sets[set].fifo_head;
      fnode;
       fnode=fnode->next)
      {
        if(fnode->address == addr){
          //printf("%d ", rd);
          cp->PDP_Ni[rd]++;
          break;
        }
        rd++;
      }

      /* Push the address to head of FIFO */
      struct pdp_fifo_node * pnode = (struct pdp_fifo_node *)malloc(sizeof(struct pdp_fifo_node));
      pnode->address = addr;
      tnode = cp->sets[set].fifo_head;
      if(tnode) tnode->prev = pnode;
      pnode->next = tnode;
      pnode->prev = NULL;
      cp->sets[set].fifo_head = pnode;
      if(!tnode) cp->sets[set].fifo_tail = cp->sets[set].fifo_head;

      /* truncate fifo on overflow */
      if(cp->sets[set].nfifo == PDP_PD_MAX){
        fnode = cp->sets[set].fifo_tail;
        if(fnode){
          if(fnode->prev){
          fnode->prev->next = NULL;
          cp->sets[set].fifo_tail = fnode->prev;
        }
          free(fnode);
        }
      }
      else cp->sets[set].nfifo++;
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* PDP reuse routine */
    if(cp->policy == PDP){
      blk->rpd = PDP_PD - 1;
      blk->reused = 1;

      /* previously hit block, so distance is zero; skipping fifo update */
          cp->PDP_Ni[0]++;
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}
예제 #18
0
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);
  struct cache_blk_t *blk, *repl;
  int lat = 0;

  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }

  /* cache block not found */

  /* **MISS** */
  cp->misses++;

  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
	case DIP:
		repl = cp->sets[set].way_tail;
		enum list_loc_t where;		/* insert location */
		if ( cp->sets[set].DIP_set_type == LRU_set ) {
			where = Head;
			// update PSEL to bias BIP
			int max_PSEL = 1 << cp->width_PSEL - 1;
			if ( cp->PSEL < max_PSEL ) {
				cp->PSEL ++;
			}
		}
		else if ( cp->sets[set].DIP_set_type == BIP_set ) {
			if ( cp->BIPCTR == 0 ) {
				// use LRU policy, MRU insertion
				where = Head;
			}
			else {
				// use LIP policy, LRU insertion
				where = Tail;
			}
			// update BIPCTR in a non-saturating way
//			int max_BIPCTR = 1 << cp->width_BIPCTR - 1;
//			if ( cp->BIPCTR < max_BIPCTR ) {
//					cp->BIPCTR ++;
//			}
//			else {
//					cp->BIPCTR = 0;
//			}
			// update PSEL to bias LRU
			if ( cp->PSEL > 0 ) {
				cp->PSEL --;
			}
		}
		else {
			// most significant bit of PSEL counter
			int MSB_PSEL = cp->PSEL >> (cp->width_PSEL - 1);
			if ( MSB_PSEL == 1 ) {
				// use BIP
				if ( cp->BIPCTR == 0 ) {
					// use LRU policy, MRU insertion
					where = Head;
				}
				else {
					// use LIP policy, LRU insertion
					where = Tail;
				}
				// need to update BIPCTR ?
			}
			else {
				// use LRU
				where = Head;
			}
		}
    update_way_list(&cp->sets[set], repl, where);
		// update BIPCTR in a non-saturating way
		int max_BIPCTR = 1 << cp->width_BIPCTR - 1;
		if ( cp->BIPCTR < max_BIPCTR ) {
				cp->BIPCTR ++;
		}
		else {
				cp->BIPCTR = 0;
		}
		break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }

  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */

  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* if DIP replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == DIP)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);
  struct cache_blk_t *blk, *repl;
  int lat = 0;
  int victim;	//DRRIP-Used in while loop to check if we found the victim block or not
  int RRPV_counter;

  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;	//DRRIP-No need to make RRPV=0 here as the block is accessed previously, so in RRIP-HP it is already 0
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
		{
			blk->RRPV=0;	//DRRIP-Implementing Re-Reference, when we encounter hit then make RRPV=0
			//printf("Its a hit and now RRPV=%d\n", blk->RRPV);
	    		goto cache_hit;
		}
	}
    }

  /* cache block not found */

  /* **MISS***/
  cp->misses++;

  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */

  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case DRRIP:
	RRPV_counter=1<<(cp->RRPV_bits);
	//printf("Max choices=%d\n",RRPV_counter);
	victim=0;
	while(victim==0)	//DRRIP-We keep on looking till the time we don't find the victim block
	{
		//DRRIP-Traversing code copied from hit policy line 559
      		for (blk=cp->sets[set].way_head;blk;blk=blk->way_next)	//DRRIP-Resolves the tie breaker automatically
		{       
			//printf("Value of RRPVin a set=%d\n",blk->RRPV);
			if(blk->RRPV==RRPV_counter-1)                 //DRRIP-To check if we have a block on set with RRPV=3
			{
				victim=1;
				repl=blk;		//DRRIP-Address of the victim block assogned to block to be inserted
				break;
			}
		}
		if(victim==0)	//DRRIP-Incase unable to find blk with RRPV=3
		{
			 for (blk=cp->sets[set].way_head;blk;blk=blk->way_next)  //DRRIP-Traverse the blocks and increment RRPV by 1
				blk->RRPV++;
                }
		if(victim==1)	//DRRIP-Work is done, now get out and update the parameters of inserted block
			break;
	}
        //What to do now when we know that this is the blck to replace
	//Now we will populate the elements of repl block with new data,tag,RRPV value.
    	//printf("Outside the while and victim=%d\n",victim);
      
	//DRRIP-Update the RRPV value of the new inserted block for RRIP
  	if(set==0 || set%1024==0 || set%33==0)
	{
	//	printf("Inside SRRIP with original RRPV=%d\n",repl->RRPV);
		repl->RRPV=RRPV_counter-2;
	//	printf("Inside SRRIP with new RRPV=%d\n",repl->RRPV);
		if(cp->PSEL<1023)
			cp->PSEL++;
	//	printf("Inside srrip with psel=%d\n",cp->PSEL);
		break;
	}
  	else if(set%31==0)
	{
		if(cp->throttle1==31)
		{
	//		printf("Inside BRRIP infrequent case with original RRPV=%d\n",repl->RRPV);
			repl->RRPV=RRPV_counter-2;
	//		printf("Inside BRRIP infrequent case with new RRPV=%d\n",repl->RRPV);
			cp->throttle1=0;
		}
		else
		{
	//		printf("Inside BRRIP majority case with original RRPV=%d\n",repl->RRPV);
			repl->RRPV=RRPV_counter-1;
	//		printf("Inside BRRIP majority case with new RRPV=%d\n",repl->RRPV);
			cp->throttle1++;
		}
		if(cp->PSEL>0)
			cp->PSEL--;
	//	printf("Value of throttle1=%d\n",cp->throttle1);
		break;
	}
  	else
	{
	//	printf("In follower set with PSEL=%d\n",cp->PSEL);
		if(cp->PSEL<511)
		{
	//		printf("In follower SRRIP with original RRPV=%d\n",repl->RRPV);
			repl->RRPV=RRPV_counter-2;
	//		printf("In follower SRRIP with new RRPV=%d\n",repl->RRPV);
			break;
		
		}
		else
		{
			if(cp->throttle2==31)
			{
	//			printf("Inside follower BRRIP infrequent case with original RRPV=%d\n",repl->RRPV);
				repl->RRPV=RRPV_counter-2;
	//			printf("Inside follower  BRRIP infrquent case with new RRPV=%d\n",repl->RRPV);
				cp->throttle2=0;
			}
			else
			{
	//			printf("Inside follower BRRIP majority case with original RRPV=%d\n",repl->RRPV);
				repl->RRPV=RRPV_counter-1;
	//			printf("Inside follower BRRIP majority case with new RRPV=%d\n",repl->RRPV);
				cp->throttle2++;
			}
	//		printf("Value of throttle2=%d\n",cp->throttle2);
			break;
		}
	}		//End of DRRIP
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }

  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */
  
  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

   /* return latency of the operation */
  return lat;
  

 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}
예제 #20
0
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{ 
  acheck++;
  //printf("%d \n",acheck);
  byte_t *p = vp;
  md_addr_t tag;
  
  if (pseudo_check==1)
    tag = CACHE_TAG_PSEUDOASSOC(cp, addr);
  else
    tag= CACHE_TAG(cp,addr);
  md_addr_t set = CACHE_SET(cp, addr);
 
  md_addr_t bofs = CACHE_BLK(cp, addr);
  md_addr_t set1=HASH_MASK(set);
  md_addr_t addr1=addr;
  addr1 ^=1<<(cp->set_shift);
  //set1=CACHE_SET(cp,addr1);
  struct cache_blk_t *blk, *repl;
  int lat = 0;
  // if (cp->sets[set].rehash_bit==1)
    //printf("yo");
  //printf("%d",cp->sets[set].way_head->rehash_bit );
  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
     
        //printf("same block hit");
        blk = cp->last_blk;
        //cp->last_blk->rehash_bit=0;
        goto cache_fast_hit;
      
    }
   
  if (cp->hsize)
    {
      //printf("different block hit");
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else 
    { //printf("different block hit");
      
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{ 
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
      { 
       
        goto cache_hit;
      }
        
	      
      
  }
    }

  /* cache block not found */

  /* **MISS** */
 

  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

if(pseudo_check==1)
{  //printf("%d",hash_check);
   if(cp->sets[set].way_head->rehash_bit==1 )
    {//printf("hii");
      if(hash_check==1)
      { cp->sets[set].way_head->rehash_bit=1;
        goto cache_missfinal;
      }
      else if(hash_check==0)
      {
      cp->sets[set].way_head->rehash_bit=0;
      goto cache_missfinal;
    }
    }
    
    if(cp->sets[set].way_head->rehash_bit==0)
        {   //printf("hello");
          if(hash_check==1)
            { cp->sets[set].way_head->rehash_bit=1;
              temp=cp->sets[set].way_head;
              goto cache_missfinal;
            }
          else if(hash_check==0)
            {  
              
            hash_check=1;
            cache_access(cp,cmd,addr1,NULL,nbytes,now,NULL,NULL);
            //goto cache_missfinal;
            }
        }
   //unsigned int uff=0;
  temp1=cp->sets[set].way_head;
  temp1->tag=temp->tag;
 if(temp->status!=3)
    temp1->status=temp->status;//temp->status;
  //printf("%u",temp1->status);
  temp1->ready=temp->ready;
  
 temp->tag=cp->sets[set].way_head->tag;
  temp->status=cp->sets[set].way_head->status;
  temp->ready=cp->sets[set].way_head->ready;
  
  cp->sets[set].way_head->tag=temp1->tag;
  cp->sets[set].way_head->status=temp1->status;
  cp->sets[set].way_head->ready=temp1->ready;
  
  //printf("%d",temp->rehash_bit);
 
}
 
 cache_missfinal:
 hash_check=0;
  cp->misses++;
  
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }

  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */

  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}
예제 #21
0
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);
  struct cache_blk_t *blk, *repl;
  int lat = 0;
  int possible_real_miss = 0;
  int low_leak_penalty_flag =0;
  int temp;
  int decay_caused_miss = FALSE;	/* TRUE if it's a decay caused miss */


  if (b_in_dispatch)
	  b_in_dispatch = TRUE;
  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:     
	((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
if ((blk->status & CACHE_BLK_DECAYED) &&  cache_leak_is_ctrlled())
low_leak_penalty_flag  = 1;
	  if (blk->tag == tag)
	  {
		  /* Leakage: induced misses only in state losing ctrl techniques	*/
		  if ((blk->status & CACHE_BLK_DECAYED) &&  cache_leak_ctrl_is_state_losing())
		  {
			decay_caused_miss = TRUE;
			induced_decay_misses++;
			break;
		  }
		  else if ((blk->status & CACHE_BLK_DECAYED) &&  (blk->status & CACHE_BLK_VALID)\
		  			&& cache_leak_is_ctrlled())
		  {
  			/* 
			 * Leakage: update stats 
			 * in state preserving ctrl, mode switch to high happens 
			 * on a hit to a decayed block too
			 */

  			mode_switch_l2h_incr ();
			/*
	 		 * leakage throughout the cache assumed uniform. Also to model 
	 		 * the effect of settling time of leakage current, the lines 
	 		 * are assumed to be turned off after 'switch_cycles_l2h/2'. 
	 		 * The assumption is that settling is a linear function of time.
	 		 */
			low_leak_ratio_dcr(1.0/(cp->nsets * cp->assoc), get_switch_cycles_l2h()/2);
		  	
		  	goto cache_hit;
		  }	
		  else if( blk->status & CACHE_BLK_VALID)
			goto cache_hit;
	  }
	  else
		if (blk->status & CACHE_BLK_DECAYED)
		    possible_real_miss = 1;

	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
if ((blk->status & CACHE_BLK_DECAYED) &&  cache_leak_is_ctrlled())
low_leak_penalty_flag  = 1;
	  if (blk->tag == tag)
	  {
		  /* Leakage: induced misses only in state losing ctrl techniques	*/
		  if ((blk->status & CACHE_BLK_DECAYED) &&  cache_leak_ctrl_is_state_losing())
		  {
			decay_caused_miss = TRUE;
		
  	        	if (cp == decayed_cache)
			{
				induced_decay_misses++;
				break;
			}
		  }
		  else if ((blk->status & CACHE_BLK_DECAYED) &&  (blk->status & CACHE_BLK_VALID)\
		  			&& cache_leak_is_ctrlled())
		  {
  			/* 
			 * Leakage: update stats 
			 * in state preserving ctrl, mode switch to high happens 
			 * on a hit to a decayed block too
			 */

  			mode_switch_l2h_incr ();
			/*
	 		 * leakage throughout the cache assumed uniform. Also to model 
	 		 * the effect of settling time of leakage current, the lines 
	 		 * are assumed to be turned off after 'switch_cycles_l2h/2'. 
	 		 * The assumption is that settling is a linear function of time.
	 		 */
			low_leak_ratio_dcr(1.0/(cp->nsets * cp->assoc), get_switch_cycles_l2h()/2);
		  	
		  	goto cache_hit;
		  }	
		  else if( blk->status & CACHE_BLK_VALID)
			goto cache_hit;
	  }
	  else
		if (blk->status & CACHE_BLK_DECAYED)
		    possible_real_miss = 1;
	}
    }

  /* cache block not found */

  /* **MISS** */
  cp->misses++;

  if (cmd == Write) cp->write_misses++;
	else cp->read_misses++;

  if (cp == decayed_cache && !decay_caused_miss && possible_real_miss)
	 real_decay_misses++;




  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;

/* FIXMEHZG: replacement policy: choose invalid block first, does this diff from LRU?  */
#if defined(cache_decay)
	if(b_decay_enabled)
	{
		int k, found=0;
		for (blk=cp->sets[set].blks, k=0; k< cp->assoc; blk++, k++)
		{
			/* invalid block has highest priority to be evicted */
			if (!(blk->status & CACHE_BLK_VALID))	
			{
				repl = blk;
				found=1;
				break;
			}
		}
		/* Leakage: if an invalid blk can't be found, find a shutdown one */
		if (!found && cache_leak_ctrl_is_state_losing())
			for (blk=cp->sets[set].blks, k=0; k< cp->assoc; blk++, k++)
			{
				/* invalid block has highest priority to be evicted */
				if (blk->status & CACHE_BLK_DECAYED)	
				{
					repl = blk;
					break;
				}
			}
	}
#endif /* defined(cache_decay) */

    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;
 if (low_leak_penalty_flag == 1 && /* repl-> status & CACHE_BLK_DECAYED */ cache_leak_is_ctrlled() )
  {
		temp = get_low_leak_penalty();
	/* latency hiding assumed */
		  lat += get_low_leak_penalty() ;
  }


  /* Leakage: update stats */
  /* mode switch to high happens if   block to be evicted is decayed*/
  if (repl->status & CACHE_BLK_DECAYED)
  {
  	mode_switch_l2h_incr ();
	/*
	 * leakage throughout the cache assumed uniform. Also to model 
	 * the effect of settling time of leakage current, the lines 
	 * are assumed to be turned off after 'switch_cycles_l2h/2'. 
	 * The assumption is that settling is a linear function of time.
	 */
	low_leak_ratio_dcr(1.0/(cp->nsets * cp->assoc), get_switch_cycles_l2h()/2);
  }

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
		*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;


	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }/* if */

	if (b_decay_profile_enabled)
		update_cache_block_stats_when_miss(cp, repl, cmd, decay_caused_miss);

  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */
  repl->status &= ~CACHE_BLK_DECAYED;	/* not decayed	*/

  /* read data block */ 
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize, repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
  {
    repl->status |= CACHE_BLK_DIRTY;
	repl->time_dirty = sim_cycle;
  }

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;
/* Leakage: for misses in low leak mode */

 
  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */

  /* Leakage: for hits in low leak mode */

  if (blk-> status & CACHE_BLK_DECAYED && cache_leak_is_ctrlled() )
  {
  	blk->status &= ~CACHE_BLK_DECAYED;
		temp = get_low_leak_penalty();
	/* latency hiding assumed */
	if (blk->ready < now + get_low_leak_penalty ())
	  blk->ready = now + get_low_leak_penalty() +cp->hit_latency  ;
  }

	if (b_decay_profile_enabled)
		update_cache_block_stats_when_hit(cp, blk, cmd);

  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
  {
    blk->status |= CACHE_BLK_DIRTY;
	blk->time_dirty = sim_cycle;
  }

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */

  /* Leakage: for hits in low leak mode */

  if (blk-> status & CACHE_BLK_DECAYED)
  	fatal ("can't have decayed block in fast_hit");

	if (b_decay_profile_enabled)
		update_cache_block_stats_when_hit(cp, blk, cmd);
  
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
  {
    blk->status |= CACHE_BLK_DIRTY;
	blk->time_dirty = sim_cycle;
  }

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}/* cache_access */
예제 #22
0
파일: cache.c 프로젝트: jnaneshm/614_hw4
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr,	/* for address of replaced block */
 	     tick_t *mem_ready/* ptr to mem_ready of ruu_station */
             )         
{
  byte_t *p = vp;
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);
  md_addr_t blk_addr = CACHE_TAGSET(cp, addr);
  struct cache_blk_t *blk, *repl;
  int lat = 0;
  int i,mshr_hit = -1;
  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }

  /* cache block not found */

  /* **MISS** */
  cp->misses++;
///  printf("mshr enabled=%d\n",mshr_enabled);

//printf("cache_access miss num_mshr=%d\n",cp->num_mshr);
//printf("cache_access with MSHR\n");
// search mshr first to see if already exists, if so wait
// if not in mshr, insert
  if (cp->num_mshr>0) {
    cp->mshr_accesses++;     
     for (i = 0; i < cp->num_mshr; i++) {
        if (cp->mshr[i].ready <= now) {
           /* we have an empty mshr, so we can proceed with the miss */
           mshr_hit = i;
           cp->mshr_misses++;
           //printf("MSHR:miss=%d",cp->mshr_misses);
           break;
        }
     }

     if (mshr_hit == -1) { /* no empty mshr, so stall! */
       cp->mshr_full++;
       if(mem_ready!=NULL)*mem_ready = cp->ready;
       //if (cp->ready <= now) panic("Should have had empty mshr!");
       return MSHR_FULL;
     }
  }
 
//printf("B4 repl\n");
  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;
    update_way_list(&cp->sets[set], repl, Head);
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }
//  printf("B4 blkaccess\n");
  /* update block tags */
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */

  /* read data block */
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* populate mshr entries*/
  if (cp->num_mshr>0 && mshr_hit!=-1) {

     cp->mshr[mshr_hit].ready = repl->ready;
     cp->mshr[mshr_hit].block_addr= blk_addr;
     cp->mshr[mshr_hit].target_num = 1; 
//     printf("MSHR:ready and block_addr=",repl->ready,blk_addr);
     for (i = 0, cp->ready = cp->mshr[0].ready; i < cp->num_mshr; i++) {
        if (cp->mshr[i].ready < cp->ready)
           cp->ready = cp->mshr[i].ready;
     }
  }
  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  cp->hits++;

 /* mshr: check for secondary miss */
  if (cp->num_mshr>0) {
     /* is this a secondary miss? */
     if (blk->ready > now) {
        /* search for matching mshr */
    	cp->mshr_accesses++;     
        for (i = 0; i < cp->num_mshr; i++) {
           if (cp->mshr[i].block_addr == blk_addr && cp->mshr[i].ready > now) {
              if (cp->mshr[i].target_num < 4) {
                 mshr_hit = i;
                 cp->mshr[i].target_num++;
                 cp->mshr_hits++;
                 break;
              }
              else {
                 /* target space full, so stall! */
                 if(mem_ready!=NULL)*mem_ready = cp->mshr[i].ready;
                 cp->mshr_target_full++;
                 return MSHR_TARGET_FULL;
              }
           }
       }
    }
  }
  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  cp->hits++;
 /* mshr: check for secondary miss */
  if (cp->num_mshr>0) {
     /* is this a secondary miss? */
     if (blk->ready > now) {
        cp->mshr_accesses++;     
        /* search for matching mshr */
        for (i = 0; i < cp->num_mshr; i++) {
           if (cp->mshr[i].block_addr == blk_addr && cp->mshr[i].ready > now) {
              if (cp->mshr[i].target_num < 4) {
                 mshr_hit = i;
                 cp->mshr[i].target_num++;
                 cp->mshr_hits++;
                 break;
              }
              else {
                 /* target space full, so stall! */
                 if(mem_ready!=NULL)*mem_ready = cp->mshr[i].ready;
                 cp->mshr_target_full++;
                 return MSHR_TARGET_FULL;
              }
           }
       }
    }
  }
  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}
예제 #23
0
/* access a cache, perform a CMD operation on cache CP at address ADDR,
   places NBYTES of data at *P, returns latency of operation if initiated
   at NOW, places pointer to block user data in *UDATA, *P is untouched if
   cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no
   user data is attached to blocks */
unsigned int				/* latency of access in cycles */
cache_access(struct cache_t *cp,	/* cache to access */
	     enum mem_cmd cmd,		/* access type, Read or Write */
	     md_addr_t addr,		/* address of access */
	     void *vp,			/* ptr to buffer for input/output */
	     int nbytes,		/* number of bytes to access */
	     tick_t now,		/* time of access */
	     byte_t **udata,		/* for return of user data ptr */
	     md_addr_t *repl_addr)	/* for address of replaced block */
{
  byte_t *p = vp;
  //tag, set is the address want to access
  md_addr_t tag = CACHE_TAG(cp, addr);
  md_addr_t set = CACHE_SET(cp, addr);
  md_addr_t bofs = CACHE_BLK(cp, addr);

  struct cache_blk_t *blk, *repl;
  int lat = 0;

  //printf("mother f****r\n");
  //sprintf(buf, "%s.inv_rate", name);

  /* default replacement address */
  if (repl_addr)
    *repl_addr = 0;

  /* check alignments */
  if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0)
    fatal("cache: access error: bad size or alignment, addr 0x%08x", addr);

  /* access must fit in cache block */
  /* FIXME:
     ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */
  if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize))
    fatal("cache: access error: access spans block, addr 0x%08x", addr);

  /* permissions are checked on cache misses */

  /* check for a fast hit: access to same block */
  if (CACHE_TAGSET(cp, addr) == cp->last_tagset)
    {
      /* hit in the same block */
      blk = cp->last_blk;
      goto cache_fast_hit;
    }
    
  if (cp->hsize)
    {
      /* higly-associativity cache, access through the per-set hash tables */
      int hindex = CACHE_HASH(cp, tag);

      for (blk=cp->sets[set].hash[hindex];
	   blk;
	   blk=blk->hash_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }
  else
    {
      /* low-associativity cache, linear search the way list */
      for (blk=cp->sets[set].way_head;
	   blk;
	   blk=blk->way_next)
	{
	  if (blk->tag == tag && (blk->status & CACHE_BLK_VALID))
	    goto cache_hit;
	}
    }

  /* cache block not found */

  /* **MISS** */
  cp->misses++;

  /* select the appropriate block to replace, and re-link this entry to
     the appropriate place in the way list */
  switch (cp->policy) {
  case LRU:
  case FIFO:
    repl = cp->sets[set].way_tail;


    update_way_list(&cp->sets[set], repl, Head);
	//the tail of way list is the victim
	//move the victim to the head and wait new data block replacing it
	//each time when cache misses, new fetched cache block will be placed in MRU position
	//cp->sets[set].way_tail still points to victim cache, but the position of victim cache is moved to the head of way list of this set
    break;
  case Random:
    {
      int bindex = myrand() & (cp->assoc - 1);
      repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex);
    }
    break;
  case Pseudo:
  {
	  int bindex=0;
	  int width_PLRU_state=cp->assoc-1;
	  int PLRU_state=cp->sets[set].PLRU_STATE;
	  int width_bindex=0;
	  for(width_bindex=0;;width_bindex++){
		  if((cp->assoc>>width_bindex)==1){
			  break;
		  }
	  }
	  int i=0;
	  int j=0;
	  bindex=0;
	  for(j=0;j<width_bindex;j++){
		  int bit=(PLRU_state>>i)&0x1;
		  if(bit==1){
			  i=i*2+2;
		  }else if(bit==0){
			  i=i*2+i;
		  }
		  bit=bit^0x1;
		  bindex=(bindex<<1)|bit;
	  }
	  repl=CACHE_BINDEX(cp,cp->sets[set].blks,bindex);
	  int orig_PLRU_state=cp->sets[set].PLRU_STATE;
	  cp->sets[set].PLRU_STATE=update_PLRU_state(cp->assoc,bindex,orig_PLRU_state);
  }
	  break;
  case MRU:
	  repl=cp->sets[set].way_head;
	  update_way_list(&cp->sets[set],repl,Tail);
	  break;
  case LIP:
	  repl=cp->sets[set].way_tail;
	  break;
  default:
    panic("bogus replacement policy");
  }

  /* remove this block from the hash bucket chain, if hash exists */
  if (cp->hsize)
    unlink_htab_ent(cp, &cp->sets[set], repl);

  /* blow away the last block to hit */
  cp->last_tagset = 0;
  cp->last_blk = NULL;
  cp->last_blk_addr=0;

  /* write back replaced block data */
  if (repl->status & CACHE_BLK_VALID)
    {
      cp->replacements++;

      if (repl_addr)
	*repl_addr = CACHE_MK_BADDR(cp, repl->tag, set);

	  //last accessed address
	  //repl->rag and set form the address of the victim cache block
	  //cp->last_blk_addr records the address of victim
	  //
	  //to use victim cache,
	  //if l1 cache misses, then victim cache will be scanned
	  //if victim cache hits, then the victim data block in l1 cache and hit data block in victim cache will excahnge
	  cp->last_blk_addr=CACHE_MK_BADDR(cp, repl->tag, set);
 
      /* don't replace the block until outstanding misses are satisfied */
      lat += BOUND_POS(repl->ready - now);
 
      /* stall until the bus to next level of memory is available */
      lat += BOUND_POS(cp->bus_free - (now + lat));
 
      /* track bus resource usage */
      cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1;

      if (repl->status & CACHE_BLK_DIRTY)
	{
	  /* write back the cache block */
	  cp->writebacks++;
	  //repl is the block to be replaced
	  lat += cp->blk_access_fn(Write,
				   CACHE_MK_BADDR(cp, repl->tag, set),
				   cp->bsize, repl, now+lat);
	}
    }

  /* update block tags */
  //tag is the real address want to access, so use tag to replace repl->tag equalizes replace the victim with desired cache block
  repl->tag = tag;
  repl->status = CACHE_BLK_VALID;	/* dirty bit set on update */

  /* read data block */
  //if victim cache exists
  //read the data block from victim cache first
  //and then read data block from lower level cache
  //to add victim cache, just need to modify dl1_access_fn function
  lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize,
			   repl, now+lat);

  /* copy data out of cache block */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, repl, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    repl->status |= CACHE_BLK_DIRTY;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = repl->user_data;

  /* update block status */
  repl->ready = now+lat;

  /* link this entry back into the hash table */
  if (cp->hsize)
    link_htab_ent(cp, &cp->sets[set], repl);

  /* return latency of the operation */
  return lat;


 cache_hit: /* slow hit handler */
  
  /* **HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* if LRU replacement and this is not the first element of list, reorder */
  if (blk->way_prev && cp->policy == LRU)
    {
      /* move this block to head of the way (MRU) list */
      update_way_list(&cp->sets[set], blk, Head);
    }

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));

 cache_fast_hit: /* fast hit handler */
  
  /* **FAST HIT** */
  cp->hits++;

  /* copy data out of cache block, if block exists */
  if (cp->balloc)
    {
      CACHE_BCOPY(cmd, blk, bofs, p, nbytes);
    }

  /* update dirty status */
  if (cmd == Write)
    blk->status |= CACHE_BLK_DIRTY;

  /* this block hit last, no change in the way list */

  /* tag is unchanged, so hash links (if they exist) are still valid */

  /* get user block data, if requested and it exists */
  if (udata)
    *udata = blk->user_data;

  /* record the last block to hit */
  cp->last_tagset = CACHE_TAGSET(cp, addr);
  cp->last_blk = blk;

  /* return first cycle data is available to access */
  return (int) MAX(cp->hit_latency, (blk->ready - now));
}