Ejemplo n.º 1
0
/* TODO: add noreturn attribute. */
void _ITM_CALL_CONVENTION _ITM_rollbackTransaction(TX_ARGS
                              const _ITM_srcLocation *__src)
{
  /* TODO check exactly the purpose of this function */
  TX_GET_ABI;
  stm_rollback(tx, STM_ABORT_EXPLICIT);
}
Ejemplo n.º 2
0
/* TODO: add noreturn attribute. */
void _ITM_CALL_CONVENTION _ITM_abortTransaction(TX_ARGS
                              _ITM_abortReason __reason)
{
  TX_GET_ABI;
  if( __reason == userAbort) {
    /* __tm_abort was invoked. */
    __reason = STM_ABORT_NO_RETRY;
  } else if(__reason == userRetry) {
    /* __tm_retry was invoked. */
    __reason = STM_ABORT_EXPLICIT;
  }
  stm_rollback(tx, __reason);
}
Ejemplo n.º 3
0
/*
 * Catch signal (to emulate non-faulting load).
 */
static void signal_catcher(int sig)
{
  stm_tx_t *tx = stm_get_tx();

  /* A fault might only occur upon a load concurrent with a free (read-after-free) */
  PRINT_DEBUG("Caught signal: %d\n", sig);

  if (tx == NULL || (tx->attr != NULL && tx->attr->no_retry)) {
    /* There is not much we can do: execution will restart at faulty load */
    fprintf(stderr, "Error: invalid memory accessed and no longjmp destination\n");
    exit(1);
  }

  tx->aborts_invalid_memory++;
  /* Will cause a longjmp */
  stm_rollback(tx, STM_ABORT_SIGNAL);
}
Ejemplo n.º 4
0
Archivo: stm.c Proyecto: HPDCS/stmF2C2
/*
 * Catch signal (to emulate non-faulting load).
 */
static void
signal_catcher(int sig)
{
  sigset_t block_signal;
  stm_tx_t *tx = tls_get_tx();

  /* A fault might only occur upon a load concurrent with a free (read-after-free) */
  PRINT_DEBUG("Caught signal: %d\n", sig);

  /* TODO: TX_KILLED should be also allowed */
  if (tx == NULL || tx->attr.no_retry || GET_STATUS(tx->status) != TX_ACTIVE) {
    /* There is not much we can do: execution will restart at faulty load */
    fprintf(stderr, "Error: invalid memory accessed and no longjmp destination\n");
    exit(1);
  }

  /* Unblock the signal since there is no return to signal handler */
  sigemptyset(&block_signal);
  sigaddset(&block_signal, sig);
  pthread_sigmask(SIG_UNBLOCK, &block_signal, NULL);

  /* Will cause a longjmp */
  stm_rollback(tx, STM_ABORT_SIGNAL);
}
Ejemplo n.º 5
0
_CALLCONV void
stm_abort_tx(stm_tx_t *tx, int reason)
{
    stm_rollback(tx, reason | STM_ABORT_EXPLICIT);
}
Ejemplo n.º 6
0
/*
 * Called by the CURRENT thread to abort a transaction.
 */
_CALLCONV void
stm_abort(int reason)
{
    TX_GET;
    stm_rollback(tx, reason | STM_ABORT_EXPLICIT);
}
Ejemplo n.º 7
0
/*
 * Set the CURRENT transaction as irrevocable.
 */
static INLINE int
int_stm_set_irrevocable(stm_tx_t *tx, int serial)
{
#ifdef IRREVOCABLE_ENABLED
# if CM == CM_MODULAR
    stm_word_t t;
# endif /* CM == CM_MODULAR */

    if (!IS_ACTIVE(tx->status) && serial != -1) {
        /* Request irrevocability outside of a transaction or in abort handler (for next execution) */
        tx->irrevocable = 1 + (serial ? 0x08 : 0);
        return 0;
    }

    /* Are we already in irrevocable mode? */
    if ((tx->irrevocable & 0x07) == 3) {
        return 1;
    }

    if (tx->irrevocable == 0) {
        /* Acquire irrevocability for the first time */
        tx->irrevocable = 1 + (serial ? 0x08 : 0);
#ifdef HYBRID_ASF
        /* TODO: we shouldn't use pthread_mutex/cond since it could use syscall. */
        if (tx->software == 0) {
            asf_abort(ASF_RETRY_IRREVOCABLE);
            return 0;
        }
#endif /* HYBRID_ASF */
        /* Try acquiring global lock */
        if (_tinystm.irrevocable == 1 || ATOMIC_CAS_FULL(&_tinystm.irrevocable, 0, 1) == 0) {
            /* Transaction will acquire irrevocability after rollback */
            stm_rollback(tx, STM_ABORT_IRREVOCABLE);
            return 0;
        }
        /* Success: remember we have the lock */
        tx->irrevocable++;
        /* Try validating transaction */
#if DESIGN == WRITE_BACK_ETL
        if (!stm_wbetl_validate(tx)) {
            stm_rollback(tx, STM_ABORT_VALIDATE);
            return 0;
        }
#elif DESIGN == WRITE_BACK_CTL
        if (!stm_wbctl_validate(tx)) {
            stm_rollback(tx, STM_ABORT_VALIDATE);
            return 0;
        }
#elif DESIGN == WRITE_THROUGH
        if (!stm_wt_validate(tx)) {
            stm_rollback(tx, STM_ABORT_VALIDATE);
            return 0;
        }
#elif DESIGN == MODULAR
        if ((tx->attr.id == WRITE_BACK_CTL && stm_wbctl_validate(tx))
                || (tx->attr.id == WRITE_THROUGH && stm_wt_validate(tx))
                || (tx->attr.id != WRITE_BACK_CTL && tx->attr.id != WRITE_THROUGH && stm_wbetl_validate(tx))) {
            stm_rollback(tx, STM_ABORT_VALIDATE);
            return 0;
        }
#endif /* DESIGN == MODULAR */

# if CM == CM_MODULAR
        /* We might still abort if we cannot set status (e.g., we are being killed) */
        t = tx->status;
        if (GET_STATUS(t) != TX_ACTIVE || ATOMIC_CAS_FULL(&tx->status, t, t + (TX_IRREVOCABLE - TX_ACTIVE)) == 0) {
            stm_rollback(tx, STM_ABORT_KILLED);
            return 0;
        }
# endif /* CM == CM_MODULAR */
        if (serial && tx->w_set.nb_entries != 0) {
            /* TODO: or commit the transaction when we have the irrevocability. */
            /* Don't mix transactional and direct accesses => restart with direct accesses */
            stm_rollback(tx, STM_ABORT_IRREVOCABLE);
            return 0;
        }
    } else if ((tx->irrevocable & 0x07) == 1) {
        /* Acquire irrevocability after restart (no need to validate) */
        while (_tinystm.irrevocable == 1 || ATOMIC_CAS_FULL(&_tinystm.irrevocable, 0, 1) == 0)
            ;
        /* Success: remember we have the lock */
        tx->irrevocable++;
    }
    assert((tx->irrevocable & 0x07) == 2);

    /* Are we in serial irrevocable mode? */
    if ((tx->irrevocable & 0x08) != 0) {
        /* Stop all other threads */
        if (stm_quiesce(tx, 1) != 0) {
            /* Another thread is quiescing and we are active (trying to acquire irrevocability) */
            assert(serial != -1);
            stm_rollback(tx, STM_ABORT_IRREVOCABLE);
            return 0;
        }
    }

    /* We are in irrevocable mode */
    tx->irrevocable++;

#else /* ! IRREVOCABLE_ENABLED */
    fprintf(stderr, "Irrevocability is not supported in this configuration\n");
    exit(-1);
#endif /* ! IRREVOCABLE_ENABLED */
    return 1;
}
Ejemplo n.º 8
0
/*
 * Store a word-sized value (return write set entry or NULL).
 */
static inline w_entry_t *stm_write(stm_tx_t *tx, volatile stm_word_t *addr, stm_word_t value, stm_word_t mask)
{
  volatile stm_word_t *lock;
  stm_word_t l, version;
  w_entry_t *w;
  w_entry_t *prev = NULL;

  PRINT_DEBUG2("==> stm_write(t=%p[%lu-%lu],a=%p,d=%p-%lu,m=0x%lx)\n",
               tx, (unsigned long)tx->start, (unsigned long)tx->end, addr, (void *)value, (unsigned long)value, (unsigned long)mask);

  assert(IS_ACTIVE(tx->status));

  if (tx->ro) {
    /* Disable read-only and abort */
    assert(tx->attr != NULL);
    /* Update attributes to inform the caller */
    tx->attr->read_only = 0;
    tx->aborts_ro++;
    stm_rollback(tx, STM_ABORT_RO_WRITE);
    return NULL;
  }

  /* Get reference to lock */
  lock = GET_LOCK(addr);

  /* Try to acquire lock */
 restart:
  l = ATOMIC_LOAD_ACQ(lock);
 restart_no_load:
  if (LOCK_GET_OWNED(l)) {
    /* Locked */
    if (l == LOCK_UNIT) {
      /* Data modified by a unit store: should not last long => retry */
      goto restart;
    }
    /* Do we own the lock? */
    w = (w_entry_t *)LOCK_GET_ADDR(l);
    /* Simply check if address falls inside our write set (avoids non-faulting load) */
    if (tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries) {
      /* Yes */
      if (mask == 0) {
        /* No need to insert new entry or modify existing one */
        return w;
      }
      prev = w;
      /* Did we previously write the same address? */
      while (1) {
        if (addr == prev->addr) {
          /* No need to add to write set */
          if (mask != ~(stm_word_t)0) {
            if (prev->mask == 0)
              prev->value = ATOMIC_LOAD(addr);
            value = (prev->value & ~mask) | (value & mask);
          }
          prev->value = value;
          prev->mask |= mask;
          return prev;
        }
        if (prev->next == NULL) {
          /* Remember last entry in linked list (for adding new entry) */
          break;
        }
        prev = prev->next;
      }
      /* Get version from previous write set entry (all entries in linked list have same version) */
      version = prev->version;
      /* Must add to write set */
      if (tx->w_set.nb_entries == tx->w_set.size)
        stm_allocate_ws_entries(tx, 1);
      w = &tx->w_set.entries[tx->w_set.nb_entries];
      goto do_write;
    }
    /* Conflict: CM kicks in */
    tx->c_lock = lock;
    /* Abort */
    tx->aborts_locked_write++;
    stm_rollback(tx, STM_ABORT_WW_CONFLICT);
    return NULL;
  }
  /* Not locked */
  /* Handle write after reads (before CAS) */
  version = LOCK_GET_TIMESTAMP(l);
 acquire:
  if (version > tx->end) {
    /* We might have read an older version previously */
    if (!tx->can_extend || stm_has_read(tx, lock) != NULL) {
      /* Read version must be older (otherwise, tx->end >= version) */
      /* Not much we can do: abort */
      tx->aborts_validate_write++;
      stm_rollback(tx, STM_ABORT_VAL_WRITE);
      return NULL;
    }
  }
  /* Acquire lock (ETL) */
  if (tx->w_set.nb_entries == tx->w_set.size)
    stm_allocate_ws_entries(tx, 1);
  w = &tx->w_set.entries[tx->w_set.nb_entries];
  if (ATOMIC_CAS_FULL(lock, l, LOCK_SET_ADDR_WRITE((stm_word_t)w)) == 0)
    goto restart;
  /* We own the lock here (ETL) */
do_write:
  /* Add address to write set */
  w->addr = addr;
  w->mask = mask;
  w->lock = lock;
  if (mask == 0) {
    /* Do not write anything */
#ifndef NDEBUG
    w->value = 0;
#endif /* ! NDEBUG */
  } else
  {
    /* Remember new value */
    if (mask != ~(stm_word_t)0)
      value = (ATOMIC_LOAD(addr) & ~mask) | (value & mask);
    w->value = value;
  }
  w->version = version;
  w->next = NULL;
  if (prev != NULL) {
    /* Link new entry in list */
    prev->next = w;
  }
  tx->w_set.nb_entries++;
  tx->w_set.has_writes++;


  return w;
}
Ejemplo n.º 9
0
/*
 * Load a word-sized value (invisible read).
 */
static inline stm_word_t stm_read_invisible(stm_tx_t *tx, volatile stm_word_t *addr)
{
  volatile stm_word_t *lock;
  stm_word_t l, l2, value, version;
  r_entry_t *r;
  w_entry_t *w;

  PRINT_DEBUG2("==> stm_read_invisible(t=%p[%lu-%lu],a=%p)\n", tx, (unsigned long)tx->start, (unsigned long)tx->end, addr);

  assert(IS_ACTIVE(tx->status));


  /* Get reference to lock */
  lock = GET_LOCK(addr);

  /* Note: we could check for duplicate reads and get value from read set */

  /* Read lock, value, lock */
 restart:
  l = ATOMIC_LOAD_ACQ(lock);
 restart_no_load:
  if (LOCK_GET_WRITE(l)) {
    /* Locked */
    if (l == LOCK_UNIT) {
      /* Data modified by a unit store: should not last long => retry */
      goto restart;
    }
    /* Do we own the lock? */
    w = (w_entry_t *)LOCK_GET_ADDR(l);
    /* Simply check if address falls inside our write set (avoids non-faulting load) */
    if (tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries) {
      /* Yes: did we previously write the same address? */
      while (1) {
        if (addr == w->addr) {
          /* Yes: get value from write set (or from memory if mask was empty) */
          value = (w->mask == 0 ? ATOMIC_LOAD(addr) : w->value);
          break;
        }
        if (w->next == NULL) {
          /* No: get value from memory */
          value = ATOMIC_LOAD(addr);
          break;
        }
        w = w->next;
      }
      /* No need to add to read set (will remain valid) */
      return value;
    }
    /* Conflict: CM kicks in (we could also check for duplicate reads and get value from read set) */
    tx->c_lock = lock;
    /* Abort */
    tx->aborts_locked_read++;
    stm_rollback(tx, STM_ABORT_RW_CONFLICT);
    return 0;
  } else {
    /* Not locked */
    value = ATOMIC_LOAD_ACQ(addr);
    l2 = ATOMIC_LOAD_ACQ(lock);
    if (l != l2) {
      l = l2;
      goto restart_no_load;
    }
    /* Check timestamp */
    version = LOCK_GET_TIMESTAMP(l);
    /* Valid version? */
    if (version > tx->end) {
      /* No: try to extend first (except for read-only transactions: no read set) */
      if (tx->ro || !tx->can_extend || !stm_extend(tx)) {
        /* Not much we can do: abort */
        tx->aborts_validate_read++;
        stm_rollback(tx, STM_ABORT_VAL_READ);
        return 0;
      }
      /* Verify that version has not been overwritten (read value has not
       * yet been added to read set and may have not been checked during
       * extend) */
      l = ATOMIC_LOAD_ACQ(lock);
      if (l != l2) {
        l = l2;
        goto restart_no_load;
      }
      /* Worked: we now have a good version (version <= tx->end) */
    }
  }
  /* We have a good version: add to read set (update transactions) and return value */

  if (!tx->ro) {
    /* Add address and version to read set */
    if (tx->r_set.nb_entries == tx->r_set.size)
      stm_allocate_rs_entries(tx, 1);
    r = &tx->r_set.entries[tx->r_set.nb_entries++];
    r->version = version;
    r->lock = lock;
  }
  return value;
}
Ejemplo n.º 10
0
/*
 * Called by the CURRENT thread to abort a transaction.
 */
void stm_abort(TXPARAMS int reason)
{
  TX_GET;
  stm_rollback(tx, reason | STM_ABORT_EXPLICIT);
}
Ejemplo n.º 11
0
/*
 * Called by the CURRENT thread to commit a transaction.
 */
int stm_commit(TXPARAM)
{
  w_entry_t *w;
  stm_word_t t;
  int i;
  TX_GET;

  PRINT_DEBUG("==> stm_commit(%p[%lu-%lu])\n", tx, (unsigned long)tx->start, (unsigned long)tx->end);

  /* Decrement nesting level */
  if (--tx->nesting > 0)
    return 1;

  assert(IS_ACTIVE(tx->status));

  /* A read-only transaction can commit immediately */
  if (tx->w_set.nb_entries == 0)
    goto end;


  /* Update transaction */

  /* Get commit timestamp (may exceed VERSION_MAX by up to MAX_THREADS) */
  t = FETCH_INC_CLOCK + 1;

  /* Try to validate (only if a concurrent transaction has committed since tx->start) */
  if (tx->start != t - 1 && !stm_validate(tx)) {
    /* Cannot commit */
    tx->aborts_validate_commit++;
    stm_rollback(tx, STM_ABORT_VALIDATE);
    return 0;
  }

  /* Install new versions, drop locks and set new timestamp */
  w = tx->w_set.entries;
  for (i = tx->w_set.nb_entries; i > 0; i--, w++) {
    if (w->mask != 0)
      ATOMIC_STORE(w->addr, w->value);
    /* Only drop lock for last covered address in write set */
    if (w->next == NULL)
      ATOMIC_STORE_REL(w->lock, LOCK_SET_TIMESTAMP(t));
  }

 end:
  tx->retries = 0;



  /* Callbacks */
  if (nb_commit_cb != 0) {
    int cb;
    for (cb = 0; cb < nb_commit_cb; cb++)
      commit_cb[cb].f(TXARGS commit_cb[cb].arg);
  }


  /* Set status to COMMITTED */
  SET_STATUS(tx->status, TX_COMMITTED);

  return 1;
}