Пример #1
0
int main(int argc, char *argv[]) {
	int size = argc > 1 ? atoi(argv[1]) : 10000;
	volatile int* a = (volatile int *) malloc(size * sizeof(int));
	printf("Array size:    %d\n", size);

	int failures;
	int attempts;

	int loops = argc > 2 ? atoi(argv[2]) : 100000;
	int i;
	for (i = 0; i < loops; i++) {
		retry: attempts++;
		if (_xbegin() == _XBEGIN_STARTED) {
			for (int i = 0; i < size; i++) {
				a[i]++;
			}
			_xend();
		} else {
			failures++;
//			goto retry;
		}
	}

	free((void*) a);

	printf("Loops:         %d\n", loops);
	printf("Attempts:      %d\n", attempts);
	printf("Failures:      %d (Rate: %.2f%%)\n", failures,
			(float) failures * 100 / attempts);

	return 0;
}
Пример #2
0
Файл: mt.c Проект: yoanlcq/FATE
static void fe_mt_tsx_lock_x86rtm(fe_mt_tsx *tsx) {

    static const char *TAG = "fe_mt";

    /* Currently, we prevent nested transactions. 
     * FIXME Prove it is actually required. */
    if(_xtest()) 
        return;

retry:
    unsigned status = _xbegin();
    if(status == _XBEGIN_STARTED)
        return;
    if(status & _XABORT_RETRY)
        goto retry;

    fe_logv("x86 RTM transaction aborted."
              "Falling back to regular lock. Reason :\n\t");
    if(status & _XABORT_EXPLICIT)
        fe_logv(TAG, "Explicit abort. Code : %d\n", _XABORT_CODE(status)); 
    if(status & _XABORT_CONFLICT)
        fe_logv(TAG, "Memory conflict with another thread.\n");
    if(status & _XABORT_CAPACITY)
        fe_logv(TAG, "Too much memory used by the transaction.\n");
    if(status & _XABORT_DEBUG)
        fe_logv(TAG, "Debug trap.\n");
    if(status & _XABORT_NESTED)
        fe_logv(TAG, "Abort in an inner nested transaction.\n");

    fe_mt_tsx_lock_fallback(tsx);
}
Пример #3
0
int main()
{
  unsigned status;
  if ((status = _xbegin()) == _XBEGIN_STARTED) {
    _xend();
  } else {
    _xabort(_XABORT_CONFLICT);
  }
}
Пример #4
0
  TransactionalScope::TransactionalScope(spinlock_t &fallback_mutex, bool writeAccess) : 
    // initializaer list
    spinlock(fallback_mutex) 
  {
  unsigned int xact_status;
  threadstate_t &ts = tstate;
  
  ts.txCount++;

  // we are already executing transactionally, continue.
  if (_xtest()) return;

  do {
    xact_status = _xbegin();

    if (xact_status == _XBEGIN_STARTED) {
      
      if ( *(reinterpret_cast<int*>(&fallback_mutex)) == 0 ) { 
	return;
      } else { 
	_xabort(0xFF); 
      }
    
    } else { 
      /** We have aborted. */
      ++ts.totalAborts;
      ++ts.successiveAborts;

      // if we xaborted because the lock was held, acquire the lock
      if ((xact_status & _XABORT_EXPLICIT) && _XABORT_CODE(xact_status) == 0xFF) {
	ts.maxAborts = 1;
	ts.maxTxLen = 1;
	break;
      }

      //if xabort:retry or xabort:conflict is set retry
      if (xact_status & (_XABORT_RETRY | _XABORT_CONFLICT)) {
	ts.maxTxLen = 1;
      }

      // // if we used too much buffer space inside the transaction half the max transaction length
      if ((xact_status & _XABORT_CAPACITY)) {
	ts.maxTxLen = 1;
      }
      _mm_pause();
    }
  } while (ts.successiveAborts < ts.maxAborts);

  ts.fallbackTaken++;

  // Fallback to lock
  if (writeAccess) { 
    spinlock.lock(); 
  } else { 
    spinlock.lock_read();
  }
}
Пример #5
0
int main(void)
{
	int status;
	if ((status = _xbegin()) == _XBEGIN_STARTED) {
		if (_xtest())
			_xabort(1);
		_xend();
	} else
		printf("aborted %x, %d", status, _XABORT_CODE(status));
	return 0;
}
Пример #6
0
int main(void)
{
	unsigned status;
	if ((status = _xbegin()) == 0) { 
		TXN_ASSERT_ONLY(!foo);
	} else { 
		txn_assert_abort_hook(status);
	}	
	f2();
	return 0;
}
Пример #7
0
int HTM_start(htm_thread_data_t *self) {
	unsigned int status = 0;
	status = _xbegin();
	if (status != _XBEGIN_STARTED)
	{
		self->last_htm_abort = status;
		HTM_status_collect(self, status);
		return 0;
	}
	return 1;
}
Пример #8
0
int run_seq_write(volatile type *array[], int size) {
	if (_xbegin() == _XBEGIN_STARTED) {
		for (int i = 0; i < size; i++) {
			(*array)[i]++; // write
		}
		_xend();
		return 1;
	} else {
		return 0;
	}
}
Пример #9
0
int run_seq_read(volatile type *array[], int size) {
	type dummy;
	if (_xbegin() == _XBEGIN_STARTED) {
		for (int i = 0; i < size; i++) {
			dummy = (*array)[i]; // read - GCC option O0 needed!
		}
		_xend();
		return 1;
	} else {
		return 0;
	}
}
Пример #10
0
/*
 * [lyj] Disable the interpretation of "deleted" field.
 */
void fraser_insert(sl_intset_t *set, uint32_t v, bool lin)
{
    sl_node_t *NEW, *new_next, *pred, *succ, *succs[LEVELMAX], *preds[LEVELMAX];
    uint32_t i;
    uint32_t status;
   // uint32_t attempts = 0;
    NEW = sl_new_simple_node(v, get_rand_level(), lin);
retry:
    fraser_search(set, v, preds, succs);
    for (i = 0; i < NEW->toplevel; i++)
        NEW->nexts[i] = succs[i];
    /* Node is visible once inserted at lowest level */
    if (!ATOMIC_CAS_MB(&preds[0]->nexts[0], succs[0], NEW))
        goto retry;
    
//retry_HTM:
    status = _xbegin();
    if(status == _XBEGIN_STARTED)
    {
        for (i = 1; i < NEW->toplevel; i++) {
            if(preds[i]->nexts[i] == succs[i])
                preds[i]->nexts[i] = NEW;
            else
                _xabort(66);
        }
        _xend();
        return;
    }/*else
    {
        if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 66) {
        }
        else if (++attempts < MAX_ATTEMPT_NUM) {
            goto retry_HTM;
        }
    }*/
    
    for (i = 1; i < NEW->toplevel; i++) {
        while (1) {
            pred = preds[i];
            succ = succs[i];
            /* Update the forward pointer if it is stale */
            new_next = NEW->nexts[i];
            if ((new_next != succ) &&
                (!ATOMIC_CAS_MB(&NEW->nexts[i], unset_mark((uint32_t)new_next), succ)))
                break;
            /* Give up if pointer is marked */
            /* We retry the search if the CAS fails */
            if (ATOMIC_CAS_MB(&pred->nexts[i], succ, NEW))
                break;
            fraser_search(set, v, preds, succs);
        }
    }
}
Пример #11
0
inline int mark_node_ptrs(sl_node_t *n)
{
    sl_node_t *n_next;
    
    uint32_t status;
    //uint32_t attempts = 0;
    uint32_t i = n->toplevel - 1;
//retry:
    status = _xbegin();
    if(status == _XBEGIN_STARTED)
    {
        while ( i > 0 )
        {
            if(!is_marked((uint32_t)n->nexts[i]))
                n->nexts[i] = (sl_node_t*)set_mark((uint32_t)n->nexts[i]);
            i--;
        }
        if (is_marked((uint32_t)n->nexts[0]))
        {
            _xend();
            return 0;
        }else
        {
            n->nexts[0] = (sl_node_t*)set_mark((uint32_t)n->nexts[0]);
            _xend();
            return 1;
        }
    }/*else{
        if (++attempts < MAX_ATTEMPT_NUM) {
            goto retry;
        }
    }*/
        
    for (int i=n->toplevel-1; i>0; i--) {
        do {
            n_next = n->nexts[i];
            if (is_marked((uint32_t)n_next))
                break;
            if (ATOMIC_CAS_MB(&n->nexts[i], n_next, (sl_node_t*)set_mark((uint32_t)n_next)))
                break;
        } while (true);
    }
    do {
        n_next = n->nexts[0];
        if (is_marked((uint32_t)n_next))
            return 0;
        if (ATOMIC_CAS_MB(&n->nexts[0], n_next, (sl_node_t*)set_mark((uint32_t)n_next)))
            return 1;
    } while (true);
}
Пример #12
0
int main(int argc, char *argv[]) {
	int loops = 100, inner_loops = 100;
	int smin = 0, smax = 100000, sstep = 5000;
	int *values[] = { &loops, &smin, &smin, &smax, &smax, &sstep, &sstep };
	const char *identifier[] = { "-l", "-smin", "-rmin", "-smax", "-rmax",
			"-sstep", "-rstep" };
	handle_args(argc, argv, 7, values, identifier);

	printf("Loops: %d\n", loops);
	printf("Sizes: %d - %d, steps of %d\n", smin, smax, sstep);

	printf("Size;Expected Value;Standard deviation\n");
	for (int size = smin; size <= smax; size += sstep) {
		printf("%d", size);

		Stats stats;
		for (int l = 0; l < loops; l++) {
			int failures = 0;
			for (int il = 0; il < inner_loops; il++) {
				// init
				unsigned char a[size];
				for (int i = 0; i < size; i++)
					a[i] = 0;

				// run
				int flip = 0; // alternates between 0 (read) and 1 (write)
				int dummy;

				if (_xbegin() == _XBEGIN_STARTED) {
					for (int i = 0; i < size; i++) {
						if (flip)
							dummy = a[i]; // -O0!
						else
							a[i]++;

						flip ^= 1;
					}
					_xend();
				} else {
					failures++;
				}
			}
			float failure_rate = (float) failures * 100 / inner_loops;
			stats.addValue(failure_rate);
		}
		printf(";%.2f;%.2f\n", stats.getExpectedValue(),
				stats.getStandardDeviation());
	}
}
Пример #13
0
int BST::addRTM(Node *node) {
    int state = TRANSACTION;
    int attempt = 1;
    int nabort = 0;
    int res;

    while (1) {
        UINT status = _XBEGIN_STARTED;
        if (state == TRANSACTION) {
            status = _xbegin();
        }
        else {
            tatas_lock.acquireOptimistic();
        }

        if (status == _XBEGIN_STARTED) {
            if (state == TRANSACTION && tatas_lock.getValue()) {
                _xabort(0xA0);
                nabort++;
            }

            res = add(node);

            if (state == TRANSACTION) {
                _xend();
            }
            else {
                tatas_lock.release();
            }
            break;
        }
        else {
            if (tatas_lock.getValue()) {
                do {
                    _mm_pause();
                } while (tatas_lock.getValue());
            }
            else {
                volatile UINT64 wait = attempt;
                while (wait--);
            }
            if (++attempt >= MAXATTEMPT) {
                state = LOCK;
            }
        }
    }
    return res;
}
Пример #14
0
int run_rnd_read(volatile type *array[], int size) {
	type dummy;
	int failures = 0;
	retry:
//	srand(rnd);
	rnd = RAND(size);
	if (_xbegin() == _XBEGIN_STARTED) {
		for (int i = 0; i < size; i++) {
			dummy = (*array)[RAND(size)]; // read - GCC option O0 needed!
		}
		_xend();
		return 1;
	} else {
		if (failures++ < max_retries)
			goto retry;
		return 0;
	}
}
Пример #15
0
void *child(void *dummy)
{
	int status;
retry:
	if ((status = _xbegin()) == _XBEGIN_STARTED) {
		count = 0;
		_xend();
	} else if ((status & _XABORT_RETRY) != 0) {
		assert(0 && "please run with -X -A -S to suppress retries");
		goto retry;
	} else {
		assert((status & _XABORT_CONFLICT) != 0 && "unexpected abort mode");
		// it would be a causality violation for this to be visible to
		// the parent's xadd, which has to happen first for us to abort
		__sync_fetch_and_add(&count, 1);
	}
	return NULL;
}
Пример #16
0
void txn()
{
	int status;
	if ((status = _xbegin()) == _XBEGIN_STARTED) {
		count++;
		// TODO - 
		// omitting xend should not cause a "deadlock" in which the
		// second thread "blocks" forever at xbegin,
		// but rather should force the 1st txn to abort.
		// double bonus TODO: how should we behave if the "last" thread
		// exits w/o xending, without any contender to abort it?
		// _xend();
	} else {
		mutex_lock(&lock);
		count++;
		mutex_unlock(&lock);
	}
}
Пример #17
0
int run_rnd_write(volatile type *array[], int size) {
	int failures = 0;
	retry:
//	srand(rnd); // generate new randomness
	// (otherwise, the old "random" values would occur again after a retry
	// since the internal rand()-value would be aborted and therefore reset)
	rnd = RAND(size);
	if (_xbegin() == _XBEGIN_STARTED) {
		for (int i = 0; i < size; i++) {
			(*array)[RAND(size)]++; // write
		}
		_xend();
		return 1;
	} else {
		if (failures++ < max_retries)
			goto retry;
		return 0;
	}
}
Пример #18
0
int
__lll_trylock_elision (int *futex, short *adapt_count)
{
    /* Implement POSIX semantics by forbiding nesting
       trylock.  Sorry.  After the abort the code is re-executed
       non transactional and if the lock was already locked
       return an error.  */
    _xabort (_ABORT_NESTED_TRYLOCK);

    /* Only try a transaction if it's worth it.  */
    if (*adapt_count <= 0)
    {
        unsigned status;

        if ((status = _xbegin()) == _XBEGIN_STARTED)
        {
            if (*futex == 0)
                return 0;

            /* Lock was busy.  Fall back to normal locking.
               Could also _xend here but xabort with 0xff code
               is more visible in the profiler.  */
            _xabort (_ABORT_LOCK_BUSY);
        }

        if (!(status & _XABORT_RETRY))
        {
            /* Internal abort.  No chance for retry.  For future
               locks don't try speculation for some time.  */
            if (*adapt_count != aconf.skip_trylock_internal_abort)
                *adapt_count = aconf.skip_trylock_internal_abort;
        }
        /* Could do some retries here.  */
    }
    else
    {
        /* Lost updates are possible, but harmless.  */
        (*adapt_count)--;
    }

    return lll_trylock (*futex);
}
Пример #19
0
JNIEXPORT jobject JNICALL Java_javartm_Transaction_doTransactionally(JNIEnv *env, jclass cls, jobject atomicBlock, jobject fallbackBlock) {
	// TODO: Add some caching
	jclass atomicBlockClass = (*env)->GetObjectClass(env, atomicBlock);
	jmethodID callMethodId = (*env)->GetMethodID(env, atomicBlockClass, "call", "()Ljava/lang/Object;");
	if (!callMethodId) return NULL;

	printf("Preparing execution...\n");
	int res = _xbegin();
	if (_xtest()) {
		jobject retValue = (*env)->CallObjectMethod(env, atomicBlock, callMethodId);
		_xend();
		printf("Successful commit\n");
		return retValue;
	}

	printf("Abort or failed to start tx res = %d\n", res);
	jclass fallbackBlockClass = (*env)->GetObjectClass(env, fallbackBlock);
	callMethodId = (*env)->GetMethodID(env, fallbackBlockClass, "call", "()Ljava/lang/Object;");
	if (!callMethodId) return NULL;
	return (*env)->CallObjectMethod(env, fallbackBlock, callMethodId);
}
Пример #20
0
// Begin is used in multiple methods, but we force it to be inlined to avoid extra work after
// the transaction is started
__attribute__((always_inline)) inline int begin() {
	int status;
	int failtimes = 0;
	while ((status = _xbegin()) == (_XABORT_RETRY | _XABORT_CONFLICT)) {
		// When there are multiple processors fighting for the same memory zones,
		// we get an abort with RETRY + CONFLICT flags set. For now, we use a simple
		// backoff strategy, using the x86 pause instruction (which was introduced
		// as a better means of doing backoff during spinlocks).
		//
		// Some open issues/notes on this approach:
		// - Should we just give up after a while instead of eternally looping?
		// - Should backoff be more agressive than linearly increasing failtimes?
		// - What's the best value for the limit?
		failtimes++;
		for (int i = 0; i < failtimes; i++) {
			_mm_pause(); _mm_pause(); _mm_pause(); _mm_pause(); _mm_pause();
		}
		failtimes = failtimes < PAUSETIMES_LIMIT ? failtimes : PAUSETIMES_LIMIT;
	}
	return status;
}
Пример #21
0
void spin_lock_rtm(int *lock)
{
	int i;
	unsigned status;
	unsigned retry = RETRY_OTHER;

	for (i = 0; i < retry; i++) {
		if ((status = _xbegin()) == _XBEGIN_STARTED) {
			if (lock_is_free(lock))
				return;
			_xabort(0xff);
		}
		trace_abort(status);
		if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
			while (!lock_is_free(lock))
				pause();
		} else if (!(status & _XABORT_RETRY) && !(status & _XABORT_CAPACITY))
			break;

		if (status & _XABORT_CONFLICT) {
			retry = RETRY_CON;
			while (!lock_is_free(lock))
				pause();
			/* Could do various kinds of backoff here. */
		} else if (status & _XABORT_CAPACITY) {
			retry = RETRY_CAP;
		} else {
			retry = RETRY_OTHER;
		}
	}
	/* Could do adaptation here */

	while (__sync_sub_and_fetch(lock, 1) < 0) {
		do
			pause();
		while (!lock_is_free(lock));
		/* Could do respeculation here */
	}
}
Пример #22
0
JNIEXPORT jint JNICALL Java_javartm_Transaction_begin(JNIEnv *env, jclass cls) {
	return _xbegin();
}
Пример #23
0
unsigned int test_xbegin(void) {
  // CHECK: i32 @llvm.x86.xbegin()
  return _xbegin();
}
Пример #24
0
unsigned int __attribute__((__target__("rtm"))) xbegin_wrap(void) {
  return _xbegin();
}
Пример #25
0
void mem_write(tid_t tid, uint32_t *addr, uint32_t val) {
    version_t version;
    objid_t objid = calc_objid(addr);
    struct objinfo *info = &g_objinfo[objid];

    if ((g_sim_bbcnt % RTM_BATCH_N) == 0) {
        assert(!_xtest());
        int ret = _xbegin();
        (void)ret;
#ifdef RTM_STAT
        if (ret != _XBEGIN_STARTED) {
            fprintf(stderr, "T%d W%ld aborted %x, %d\n", g_tid, memop, ret,
                    _XABORT_CODE(ret));
            g_rtm_abort_cnt++;
        }
#endif
    }

    int in_rtm = _xtest();
    if (in_rtm) {
        version = info->version;
        // XXX To ensure exclusion of write tx and fallback. Same as in read
        // transaction.
        if (info->write_lock) {
            _xabort(3);
        }
        barrier();
        *addr = val;
        barrier();
        info->version += 2;
        // XXX The barrier is necessary, because there are reordering inside a
        // transaction.  The reason is the same as in seqlock implementation.
        __sync_synchronize();
    } else {
        spin_lock(&info->write_lock);

        version = info->version;
        barrier();

        // Odd version means that there's writer trying to update value.
        info->version++;
        barrier();
        *addr = val;
        // This barrier disallows read to happen before the write.
        // The explicit barrier here may also make the compiler unnecessary here.
        __sync_synchronize();
        info->version++;

        spin_unlock(&info->write_lock);
    }

    if (in_rtm && (g_sim_bbcnt % RTM_BATCH_N == RTM_BATCH_N - 1))  {
        // XXX Update: since we have checked lock in tx region, we
        // will abort for parallel execution of write tx and write fallback.
        // So no need to check for lock here.
        /*
         *if (info->write_lock) {
         *    _xabort(4);
         *}
         */
        _xend();
        // Avoid taking log inside transaction.
        batch_write_log(objid, version);
        batch_process_log();
    } else {
        batch_write_log(objid, version);
    }

    g_sim_bbcnt++;
}
Пример #26
0
uint32_t mem_read(tid_t tid, uint32_t *addr) {
    version_t version;
    uint32_t val;
    objid_t objid = calc_objid(addr);
    struct objinfo *info = &g_objinfo[objid];

    if ((g_sim_bbcnt % RTM_BATCH_N) == 0) { // Simulate basic block begin.
        assert(!_xtest());
        int ret = _xbegin();
        (void)ret;
#ifdef RTM_STAT
        if (ret != _XBEGIN_STARTED) {
            fprintf(stderr, "T%d R%ld aborted %x, %d\n", g_tid, memop, ret,
                    _XABORT_CODE(ret));
            g_rtm_abort_cnt++;
        }
#endif
    }

    int in_rtm = _xtest();
    if (in_rtm) {
        version = info->version;
        // XXX It's possible the transaction commits while another write is in
        // fallback handler and has increased version by 1, thus we would get an
        // odd version here. Also refer to read tx in rtmseq.
        if (version & 1) {
            _xabort(1);
        }
        val = *addr;
    } else {
        do {
            version = info->version;
            while (unlikely(version & 1)) {
                cpu_relax();
                version = info->version;
            }
            barrier();
            val = *addr;
            barrier();
        } while (version != info->version);
    }

    if (in_rtm && (g_sim_bbcnt % RTM_BATCH_N == RTM_BATCH_N - 1)) { // Simulate basic block end.
        // XXX Update: since we have checked odd version in tx region, we
        // will abort for parallel execution of read tx and write fallback.
        // So no need to check for lock here.
        // XXX A transaction is accessing different shared objects. Here we only
        // check for a single object's write lock, it's not enough. That's why
        // we need the odd version check in the transaction region.
        /*
         *if (info->write_lock) {
         *    _xabort(2);
         *}
         */
        _xend();
        // Avoid taking log inside transaction.
        batch_read_log(objid, version);
        batch_process_log();
    } else {
        batch_read_log(objid, version);
    }

    g_sim_bbcnt++;
    return val;
}