예제 #1
0
uint64_t MESITopCC::processInval(Address lineAddr, uint32_t lineId, InvType type, bool* reqWriteback, uint64_t cycle, uint32_t srcId) {
    if (type == FWD) {//if it's a FWD, we should be inclusive for now, so we must have the line, just invLat works
        assert(!nonInclusiveHack); //dsm: ask me if you see this failing and don't know why
        return cycle;
    } else {
        //Just invalidate or downgrade down to children as needed
        return sendInvalidates(lineAddr, lineId, type, reqWriteback, cycle, srcId);
    }
}
예제 #2
0
uint64_t MESITopCC::processEviction(Address wbLineAddr, uint32_t lineId, bool* reqWriteback, uint64_t cycle, uint32_t srcId) {
    if (nonInclusiveHack) {
        // Don't invalidate anything, just clear our entry
        array[lineId].clear();
        return cycle;
    } else {
        //Send down invalidates
        return sendInvalidates(wbLineAddr, lineId, INV, reqWriteback, cycle, srcId);
    }
}
예제 #3
0
uint64_t MESITopCC::processAccess(Address lineAddr, uint32_t lineId, AccessType type, uint32_t childId, bool haveExclusive,
                                  MESIState* childState, bool* inducedWriteback, uint64_t cycle, uint32_t srcId, uint32_t flags) {
    Entry* e = &array[lineId];
    uint64_t respCycle = cycle;
    switch (type) {
        case PUTX:
            assert(e->isExclusive());
            if (flags & MemReq::PUTX_KEEPEXCL) {
                assert(e->sharers[childId]);
                assert(*childState == M);
                *childState = E; //they don't hold dirty data anymore
                break; //don't remove from sharer set. It'll keep exclusive perms.
            }
            //note NO break in general
        case PUTS:
            assert(e->sharers[childId]);
            e->sharers[childId] = false;
            e->numSharers--;
            *childState = I;
            break;
        case GETS:
            if (e->isEmpty() && haveExclusive && !(flags & MemReq::NOEXCL)) {
                //Give in E state
                e->exclusive = true;
                e->sharers[childId] = true;
                e->numSharers = 1;
                *childState = E;
            } else {
                //Give in S state
                assert(e->sharers[childId] == false);

                if (e->isExclusive()) {
                    //Downgrade the exclusive sharer
                    respCycle = sendInvalidates(lineAddr, lineId, INVX, inducedWriteback, cycle, srcId);
                }

                assert_msg(!e->isExclusive(), "Can't have exclusivity here. isExcl=%d excl=%d numSharers=%d", e->isExclusive(), e->exclusive, e->numSharers);

                e->sharers[childId] = true;
                e->numSharers++;
                e->exclusive = false; //dsm: Must set, we're explicitly non-exclusive
                *childState = S;
            }
            break;
        case GETX:
            assert(haveExclusive); //the current cache better have exclusive access to this line

            // If child is in sharers list (this is an upgrade miss), take it out
            if (e->sharers[childId]) {
                assert_msg(!e->isExclusive(), "Spurious GETX, childId=%d numSharers=%d isExcl=%d excl=%d", childId, e->numSharers, e->isExclusive(), e->exclusive);
                e->sharers[childId] = false;
                e->numSharers--;
            }

            // Invalidate all other copies
            respCycle = sendInvalidates(lineAddr, lineId, INV, inducedWriteback, cycle, srcId);

            // Set current sharer, mark exclusive
            e->sharers[childId] = true;
            e->numSharers++;
            e->exclusive = true;

            assert(e->numSharers == 1);

            *childState = M; //give in M directly
            break;

        default: panic("!?");
    }

    return respCycle;
}
uint64_t flexclusive_MESITopCC::processAccess(
    Address lineAddr, uint32_t lineId, AccessType type, uint32_t childId,
    bool haveExclusive, MESIState* childState, bool* inducedWriteback,
    uint64_t cycle, uint32_t srcId, uint32_t flags, bool isValid, CLUState cs) {
  if (cs == EX) {
    uint64_t respCycle = cycle;

    if ((int)lineId == -1) {
      assert(type == GETS || type == GETX);
      if (!(flags & MemReq::INNER_COPY)) {  // i.e. if line was not found in
                                            // inner levels in case of excl llc
        assert(search_inner_banks(lineAddr, childId) == 0);
        if (type == GETS) {
          assert(*childState == I);
          *childState = E;  // as line is gonna come in E state, as levels below
                            // excl are excl
          // if shared cache, we don't know which core owns the copy though
        } else
          *childState = M;
        return respCycle;
      } else {
        assert(search_inner_banks(lineAddr, childId) == 1);
        if (type == GETS) {
          respCycle = sendInvalidates(
              lineAddr, lineId, INVX, inducedWriteback, cycle, srcId,
              childId);  // sets inner level copies to S state
          *childState = S;
        } else {
          respCycle = sendInvalidates(
              lineAddr, lineId, INV, inducedWriteback, cycle, srcId,
              childId);  // sets inner level copies to I state
          *childState = M;
        }
        return respCycle;
      }
    }

    // Entry* e = &array[lineId]; //not needed for exclusive cache

    switch (type) {
      case PUTX:
      case PUTS:
        *childState = I;  // if data should not be duplicated in
        // any child of the child cache
        // then we should not be cycling the data
        // So we need the duplicate bit to enable this
        // decision but oh well
        // that is for the flexclusive cache
        break;

      case GETS:
        assert(*childState == I);
        // assert_msg(search_inner_banks(lineAddr, childId) == 0, "Haveexclusive
        // is %d", haveExclusive);
        // this assertion is not correct
        *childState = E;  // could also be M
        // we just specified E
        // need to change this for accuracy
        break;
      case GETX:
        // assert(search_inner_banks(lineAddr, childId) == 0); //this assertion
        // is not correct
        assert(haveExclusive);  // the current cache better have exclusive
                                // access to this line

        *childState = M;  // give in M directly
        break;

      default:
        panic("!?");
    }

    return respCycle;

  } else {
    Entry* e = &array[lineId];
    uint64_t respCycle = cycle;

    e->numSharers=0;

    uint32_t i,c;

    for(i=0; i<(uint32_t)children.size(); i++){
        e->sharers[i] = false;
    }

    for (i = 0; i < (uint32_t)valid_children.size(); i++) {
      c = valid_children[i];
      if (c == childId) {
        continue;
      }
      e->sharers[c] = true;
      e->numSharers++;
    }

    switch (type) {
      case PUTX:
        //assert(e->isExclusive());
        // info ("doing a PUTS due to eviction");
        if (flags & MemReq::PUTX_KEEPEXCL){ //never executed
          assert(e->sharers[childId]);
          assert(*childState == M);
          *childState = E;  // they don't hold dirty data anymore
          break;  // don't remove from sharer set. It'll keep exclusive perms.
        }
      // note NO break in general

      case PUTS:
        // info ("doing a PUTS due to eviction");
        //assert(e->sharers[childId]);
        //e->sharers[childId] = false;
        //e->numSharers--;
        *childState = I;
        break;

      case GETS:
        if ((flags & MemReq::INNER_COPY) && isValid) {
          // if there was an inner copy then what do we do ?
          // we should INVX it
          // and then put it as a sharer in the TCC

          // info ("Found flag !");

          // assert(e->isEmpty()); //need not be empty
          // assert(e->sharers[0] == false);
          // assert(e->sharers[1] == false);
          // assert(e->sharers[2] == false);
          // assert(e->sharers[3] == false);
          e->exclusive = false;  // it is in shared state
          e->numSharers = 0;

          // put everything except child ID as sharer
          uint32_t i;
          assert(e->sharers[childId] == false);
          uint32_t c;
          for (i = 0; i < (uint32_t)valid_children.size(); i++) {
            c = valid_children[i];
            if (c == childId) {
              continue;
            }
            e->sharers[c] = true;
            e->numSharers++;
          }

          // assert (e->sharers[childId] == false);

          uint64_t respCycle;
          respCycle = sendInvalidates(lineAddr, lineId, INVX, inducedWriteback,
                                      cycle, srcId, true);

          // assert_msg( e->sharers[1-childId] == true, "child Id is %d,
          // e->sharers[0] is %d,  e->sharers[1] is %d ", childId,
          // (int)e->sharers[0], (int)e->sharers[1] );
          // not true because the number of sharers may not change if search
          // inner banks gave false positive
          e->sharers[childId] = true;  // set the final directory state

          // e->numSharers = 2; //2 core procesor

          e->numSharers++;  // add the child as a sharer

          assert_msg(e->numSharers > 1,
                     "The sharers at %d %d %d %d, childId is %d",
                     (int)e->sharers[0], (int)e->sharers[1], (int)e->sharers[2],
                     (int)e->sharers[3],
                     childId);  // otherwise something is wrong
          // because the inner level search
          // did a false positive and thus childstate should
          // be E
          *childState = S;
          return respCycle;
        }

        if (e->isEmpty() && haveExclusive && !(flags & MemReq::NOEXCL)) {
          // Give in E state
          //assert(e->sharers[childId] == false);
          e->exclusive = true;
          e->sharers[childId] = true;
          e->numSharers = 1;
          *childState = E;

        } else {
          // Give in S state

          assert(*childState == I);
          //assert_msg(e->sharers[childId] == false,
                     //"haveExclusive is %d, numsharers is %d, childstate is %d",
                     //haveExclusive, e->numSharers, *childState);

          //info("Here");

          //if (e->isExclusive()) {
          if((e->numSharers == 1) && haveExclusive){
            // Downgrade the exclusive sharer
            assert(e->sharers[childId]);
            respCycle = sendInvalidates(lineAddr, lineId, INVX,
                                        inducedWriteback, cycle, srcId, false);
          }

          //assert_msg(
              //!e->isExclusive(),
              //"Can't have exclusivity here. isExcl=%d excl=%d numSharers=%d",
              //e->isExclusive(), e->exclusive, e->numSharers);

          e->sharers[childId] = true;
          e->numSharers++;
          e->exclusive = false;  // dsm: Must set, we're explicitly
                                 // non-exclusive
          *childState = S;

          // assert(e->numSharers == 1);
        }

        assert(*childState != I);
        assert(e->sharers[childId] == true);
        break;
      case GETX:
        assert(haveExclusive);  // the current cache better have exclusive
                                // access to this line
           e->numSharers =0;
          for(i=0; i<(uint32_t)children.size(); i++){
           e->sharers[i] = false;
          }
           for (i = 0; i < (uint32_t)valid_children.size(); i++) {
            c = valid_children[i];
            if (c == childId) continue;  // should not happen
            e->sharers[c] = true;
            e->numSharers++;
          }
        if ((flags & MemReq::INNER_COPY) && isValid) {
          // if there was an inner copy then what do we do ?
          // we should INVX it
          // and then put it as a sharer in the TCC
          // assert(e->sharers[0] == false);
          // assert(e->sharers[1] == false);
          // assert(e->sharers[2] == false);
          // assert(e->sharers[3] == false);

          e->exclusive = false;  // it is in shared state
          e->numSharers = 0;

          uint32_t i;
          e->sharers[childId] = false;
          uint32_t c;
          for (i = 0; i < (uint32_t)valid_children.size(); i++) {
            c = valid_children[i];
            if (c == childId) continue;  // should not happen
            e->sharers[c] = true;
            e->numSharers++;
          }

          // assert (e->sharers[childId] == false);

          uint64_t respCycle;
          respCycle = sendInvalidates(lineAddr, lineId, INV, inducedWriteback,
                                      cycle, srcId, true);

          e->exclusive = true;
          e->numSharers = 1;
          e->sharers[childId] = true;

          *childState = M;

          return respCycle;
        }

        // If child is in sharers list (this is an upgrade miss), take it out
        if (e->sharers[childId]) {
          assert_msg(
              e->numSharers == 1,
              "Spurious GETX, childId=%d numSharers=%d isExcl=%d excl=%d",
              childId, e->numSharers, e->isExclusive(), e->exclusive);
          e->sharers[childId] = false;
          e->numSharers--;
        }

        // Invalidate all other copies
        respCycle = sendInvalidates(lineAddr, lineId, INV, inducedWriteback,
                                    cycle, srcId, false);

        // Set current sharer, mark exclusive
        e->sharers[childId] = true;
        e->numSharers++;
        e->exclusive = true;

        assert(e->numSharers == 1);

        *childState = M;  // give in M directly
        break;

      default:
        panic("!?");
    }
    return respCycle;
  }
}
예제 #5
0
uint64_t exclusive_MESITopCC::processAccess(Address lineAddr, uint32_t lineId, AccessType type, uint32_t childId, bool haveExclusive,
                                  MESIState* childState, bool* inducedWriteback, uint64_t cycle, uint32_t srcId, uint32_t flags) {

    uint64_t respCycle = cycle;
    if ((int) lineId == -1){
        assert( type == GETS || type == GETX );
        if (!(flags & MemReq::INNER_COPY)){ //i.e. if line was not found in inner levels in case of excl llc
          assert(search_inner_banks(lineAddr, childId) == 0);
          if (type == GETS){
              assert (*childState == I);
              *childState = E; //as line is gonna come in E state, as levels below excl are excl
                           //if shared cache, we don't know which core owns the copy though
          }
          else *childState = M;
          return respCycle;
        } else {
          assert(search_inner_banks(lineAddr, childId) == 1);
          if (type == GETS){
            respCycle = sendInvalidates(lineAddr, lineId, INVX, inducedWriteback, cycle, srcId, childId); //sets inner level copies to S state
            *childState = S;
          }
          else{
            respCycle = sendInvalidates(lineAddr, lineId, INV, inducedWriteback, cycle, srcId, childId); //sets inner level copies to I state
            *childState = M;
          }
          return respCycle;
        }
    }

    //Entry* e = &array[lineId]; //not needed for exclusive cache

    switch (type) {
        case PUTX:
        case PUTS:
            *childState = I; //if data should not be duplicated in
                             //any child of the child cache
                             //then we should not be cycling the data
                             //So we need the duplicate bit to enable this
                             //decision but oh well
                             //that is for the flexclusive cache
            break;

        case GETS:
             assert (*childState == I);
             //assert_msg(search_inner_banks(lineAddr, childId) == 0, "Haveexclusive is %d", haveExclusive);
                                                            //this assertion is not correct
             if (!(flags & MemReq::PREFETCH)){
                *childState = E; //could also be M
                             //we just specified E
                             //need to change this for accuracy
             }
            break;
        case GETX:
             //assert(search_inner_banks(lineAddr, childId) == 0); //this assertion is not correct
            assert(haveExclusive); //the current cache better have exclusive access to this line

             if (!(flags & MemReq::PREFETCH)){  //only works for L3 prefetches.
                *childState = M; //give in M directly
             }
            break;

        default: panic("!?");
    }

    return respCycle;
}