Ejemplo n.º 1
0
uint64_t CycleCounter::read()
{
	if (emulationLevel >= EMEX_EXTRA_SMALL_5XX)
	{ 
		MemoryManager* mm = deviceHandle->getMemoryManager();
		EemMemoryAccess* ema = (EemMemoryAccess*)mm->getMemoryArea("EEM");

		union CycleCount { struct { uint32_t low, high; }; uint64_t value; };
		CycleCount cycleCount;

		ema->readEemRegister(CCNT0L, &cycleCount.low) && ema->sync();
		ema->readEemRegister(CCNT0H, &cycleCount.high) && ema->sync();
		
		counterValue = 0;

		uint32_t factor = 1;
		uint32_t lsfr2hex[16] = {0x0, 0x1, 0x2, 0x7, 0x5, 0x3, 0x8, 0xb, 0xe, 0x6, 0x4, 0xa, 0xd, 0x9, 0xc, 0};
		for (int i = 0; i < 10; ++i)
		{
			counterValue += factor * lsfr2hex[(cycleCount.value & 0xf)];
			cycleCount.value >>= 4;
			factor *= 15;
		}	
	}
Ejemplo n.º 2
0
		void process() {
			while(nextOperation) {
				auto operation = nextOperation;
				switch(operation->type) {
				case OperationType::Add: onAdd(operation); break;
				case OperationType::Remove: onRemove(operation); break;
				case OperationType::RemoveAll: onRemoveAll(operation); break;
				default:
					throw std::runtime_error("Unexpected EntityOperation type");
				}

				nextOperation = operation->nextOperation;
				operation->~T();
				memoryManager->free(sizeof(T), alignof(T), operation);
			}
			nextOperation = nullptr;
			lastOperation = nullptr;
		}
Ejemplo n.º 3
0
namespace globalF4MPI{
	MemoryManager MonomialAllocator;
//	PolynomMap globalPolynomMap;

	GlobalOptions globalOptions;
	//Инициализация глобальных переменных после определения их в парсере
	void InitializeGlobalOptions(){
		CMonomial::setOrder((CMonomial::Order)globalOptions.monomOrder, globalOptions.monomOrderParam);
		CModular::setMOD(globalOptions.mod);
		CMonomial::setNumberOfVariables(globalOptions.numberOfVariables);
		globalF4MPI::MonomialAllocator.setSize(CMonomial::degreessize);
		PODvecSize<CInternalMonomial>::setvalsize(CMonomial::degreessize);
		MonomialAllocator.reset();
	}
	void Finalize(){
		MonomialAllocator.reset();
	}
}
Ejemplo n.º 4
0
    Value eval_map(
        MemoryManager                  mm,
        const Functional::value_vec_t& secondary_args,
        boost::any&                    map_state,
        Value                          subvalue
    ) const
    {
        /* If the subvalue we've been given is NULL, likewise return a NULL
         * subvalue. We can't change anything. */
        if (! subvalue) {
            return Value();
        }

        if (subvalue.type() != Value::STRING) {
            return Value();
        }

        ConstByteString text = subvalue.as_string();
        boost::shared_ptr<vector<char> > result(new vector<char>());
        // Ensure that result.data() is non-null, even if we never insert
        // anything.
        result->reserve(1);

        // value_to_data() ensures that a copy is associated with the memory
        // pool and will be deleted when the memory pool goes away.
        value_to_data(result, mm.ib());

        boost::regex_replace(
            back_inserter(*result),
            text.const_data(), text.const_data() + text.length(),
            m_expression,
            m_replacement
        );

        return Value::create_string(
            mm,
            subvalue.name(), subvalue.name_length(),
            ByteString::create_alias(
                mm,
                result->data(), result->size()
            )
        );
    }
Ejemplo n.º 5
0
ActionInstance ActionInstance::create(
    MemoryManager memory_manager,
    Context       context,
    ConstAction   action,
    const char*   parameters
)
{
    ib_action_inst_t* actioninst;

    throw_if_error(
        ib_action_inst_create(
            &actioninst,
            memory_manager.ib(),
            context.ib(),
            action.ib(),
            parameters
        )
    );

    return ActionInstance(actioninst);
}
Ejemplo n.º 6
0
TEST(TestMemoryManager, Allocations)
{
    ScopedMemoryPoolLite smpl;
    MemoryManager mm = MemoryPoolLite(smpl);
    void* p;
    char* c;

    ASSERT_TRUE(mm);

    p = NULL;
    p = mm.alloc(10);
    EXPECT_TRUE(p);

    p = NULL;
    p = mm.allocate<int>();
    EXPECT_TRUE(p);

    c = NULL;
    c = reinterpret_cast<char*>(mm.calloc(10));
    EXPECT_EQ(10L, count(c, c+10, '\0'));

    c = NULL;
    c = reinterpret_cast<char*>(mm.calloc(5, 7));
    EXPECT_EQ(35L, count(c, c+35, '\0'));

    static const string c_example = "Hello World";

    c = NULL;
    c = mm.strdup("Hello World");
    EXPECT_EQ(c_example, c);

    c = NULL;
    c = reinterpret_cast<char*>(
        mm.memdup(c_example.data(), c_example.size())
    );
    EXPECT_EQ(c_example, string(c, c_example.size()));

    c = NULL;
    c = mm.memdup_to_str(c_example.data(), c_example.size());
    EXPECT_EQ(c_example, c);
}
Ejemplo n.º 7
0
OperatorInstance OperatorInstance::create(
    MemoryManager memory_manager,
    Context context,
    ConstOperator op,
    ib_flags_t required_capabilities,
    const char* parameters
)
{
    ib_operator_inst_t* opinst;

    throw_if_error(
        ib_operator_inst_create(
            &opinst,
            memory_manager.ib(),
            context.ib(),
            op.ib(),
            required_capabilities,
            parameters
        )
    );

    return OperatorInstance(opinst);
}
Ejemplo n.º 8
0
  void CodeGen(MemoryManager& mm, handlers::CodeHandler& ch)
  {
    size_t ml = mm.RequestLocation(id);

    std::stringstream* ss = new std::stringstream();

    *ss << "buff[" << ml << "] = ";

    auto in_id = in_ids.begin();
    PrintAccess(*in_id, mm, *ss);
    in_id++;

    size_t or_count = 0;

    for(in_id = in_id;in_id != in_ids.end();in_id++)
    {
      if(or_count > or_limit)
      {
        *ss << ";";
        ch.AddEntry(ss->str());
        delete ss;
        ss = new std::stringstream();
        *ss << "buff[" << ml << "] = buff[" << ml << "]";
        or_count = or_count - 1;
      }

      *ss << " || ";
      PrintAccess(*in_id, mm , *ss);
      or_count++;
    }

    *ss << ";";

    ch.AddEntry(ss->str());
    delete ss;
  }
Ejemplo n.º 9
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    destruct
 * Signature: ()V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_nativeFinalize
  (JNIEnv *env, jobject obj)
{
	mm.Erase(env, obj);
}
Ejemplo n.º 10
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    copyConstruct
 * Signature: (Lorg/upp/AndroidMath/Vector;)V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_copyConstruct
  (JNIEnv *env, jobject jobjThis, jobject jobjThat)
{
	mm.MakeCopy(env, jobjThis, jobjThat);
}
Ejemplo n.º 11
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    construct
 * Signature: (I)V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_construct
  (JNIEnv *env, jobject obj, jint size)
{
	mm.Insert(env, obj, new Vector(size));
}
Ejemplo n.º 12
0
SerializedBuffer SerializedBuffer::allocate_grouped_buffer(
	MemoryManager &memory_manager,
	size_type maximum_record_count,
	size_type maximum_group_count,
	size_type total_key_size,
	size_type total_value_size,
	identifier_type target_node)
{
	size_type buffer_size = 0;
	// Common header
	const ptrdiff_t common_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedBufferHeader);
	// Keys
	const ptrdiff_t keys_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedKeysHeader);
	buffer_size  = align_ceil(buffer_size, alignof(max_align_t));
	const ptrdiff_t keys_data_ptrdiff = buffer_size;
	buffer_size += align_ceil(total_key_size, alignof(size_type)); // data
	const ptrdiff_t keys_offsets_ptrdiff = buffer_size;
	buffer_size += (maximum_group_count + 1) * sizeof(size_type);  // offsets
	// Values
	const ptrdiff_t values_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedValuesHeader);
	buffer_size  = align_ceil(buffer_size, alignof(max_align_t));
	const ptrdiff_t values_data_ptrdiff = buffer_size;
	buffer_size += align_ceil(total_value_size, alignof(size_type)); // data
	const ptrdiff_t values_offsets_ptrdiff = buffer_size;
	buffer_size += (maximum_record_count + 1) * sizeof(size_type);   // offsets
	buffer_size += (maximum_group_count + 1) * sizeof(size_type);   // group_offsets

	LockedMemoryReference locked_reference;
	if(target_node == TARGET_NODE_UNSPECIFIED){
		locked_reference = memory_manager.allocate(buffer_size).lock();
	}else{
		locked_reference =
			memory_manager.allocate(buffer_size, target_node).lock();
	}
	const auto ptr =
		reinterpret_cast<uintptr_t>(locked_reference.pointer());

	const auto common_header =
		reinterpret_cast<SerializedBufferHeader *>(
			ptr + common_header_ptrdiff);
	common_header->key_buffer_size =
		static_cast<size_type>(
			values_header_ptrdiff - keys_header_ptrdiff);
	common_header->value_buffer_size =
		static_cast<size_type>(buffer_size - values_header_ptrdiff);

	const auto keys_header =
		reinterpret_cast<SerializedKeysHeader *>(
			ptr + keys_header_ptrdiff);
	keys_header->data_buffer_size =
		static_cast<size_type>(keys_offsets_ptrdiff - keys_data_ptrdiff);
	keys_header->record_count = maximum_group_count;

	const auto values_header =
		reinterpret_cast<SerializedValuesHeader *>(
			ptr + values_header_ptrdiff);
	values_header->data_buffer_size =
		static_cast<size_type>(
			values_offsets_ptrdiff - values_data_ptrdiff);
	values_header->maximum_record_count = maximum_record_count;
	values_header->actual_record_count = 0;

	return SerializedBuffer(locked_reference);
}
Ejemplo n.º 13
0
		T *createOperation() {
			auto memory = memoryManager->allocate(sizeof(T), alignof(T));
			return new(memory) T();
		}
inline void ContentLeafNameTypeVector::cleanUp()
{
	fMemoryManager->deallocate(fLeafNames); //delete [] fLeafNames;
	fMemoryManager->deallocate(fLeafTypes); //delete [] fLeafTypes;
}
Ejemplo n.º 15
0
		T* create(Args && ... args) {
			auto memory = memoryManager->allocate(sizeof(T), alignof(T));
			return new(memory) T(std::forward<Args>(args) ...);
		}
MemoryManager::MemoryManager(const MemoryManager& orig):
    MemoryManager(orig.getSize()){}
Ejemplo n.º 17
0
			inline void update(FT mFT)
			{
				timelines.refresh();
				for(const auto& t : timelines) { t->update(mFT); if(t->isFinished()) timelines.del(*t); }
			}
Ejemplo n.º 18
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    set
 * Signature: (IF)V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_set
  (JNIEnv *env, jobject obj, jint id, jfloat value)
{
	Vector* vec = mm.Get(env, obj);
	vec->Set(id, value);
}
Ejemplo n.º 19
0
	void Finalize(){
		MonomialAllocator.reset();
	}
Ejemplo n.º 20
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    destruct
 * Signature: ()V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_destroy
  (JNIEnv *env, jobject obj)
{
	mm.Erase(env, obj);
}
Ejemplo n.º 21
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    copyConstruct
 * Signature: (Lorg/upp/AndroidMath/Vector;)V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_copyConstruct
  (JNIEnv *env, jobject objSrc, jobject objDst)
{
	mm.MakeCopy(env, objSrc, objDst);
}
Ejemplo n.º 22
0
void FastMatmulRecursive(LockAndCounter& locker, MemoryManager<Scalar>& mem_mngr, Matrix<Scalar>& A, Matrix<Scalar>& B, Matrix<Scalar>& C, int total_steps, int steps_left, int start_index, double x, int num_threads, Scalar beta) {
    // Update multipliers
    C.UpdateMultiplier(A.multiplier());
    C.UpdateMultiplier(B.multiplier());
    A.set_multiplier(Scalar(1.0));
    B.set_multiplier(Scalar(1.0));
    // Base case for recursion
    if (steps_left == 0) {
        MatMul(A, B, C);
        return;
    }

    Matrix<Scalar> A11 = A.Subblock(2, 2, 1, 1);
    Matrix<Scalar> A12 = A.Subblock(2, 2, 1, 2);
    Matrix<Scalar> A21 = A.Subblock(2, 2, 2, 1);
    Matrix<Scalar> A22 = A.Subblock(2, 2, 2, 2);
    Matrix<Scalar> B11 = B.Subblock(2, 2, 1, 1);
    Matrix<Scalar> B12 = B.Subblock(2, 2, 1, 2);
    Matrix<Scalar> B21 = B.Subblock(2, 2, 2, 1);
    Matrix<Scalar> B22 = B.Subblock(2, 2, 2, 2);
    Matrix<Scalar> C11 = C.Subblock(2, 2, 1, 1);
    Matrix<Scalar> C12 = C.Subblock(2, 2, 1, 2);
    Matrix<Scalar> C21 = C.Subblock(2, 2, 2, 1);
    Matrix<Scalar> C22 = C.Subblock(2, 2, 2, 2);


    // Matrices to store the results of multiplications.
#ifdef _PARALLEL_
    Matrix<Scalar> M1(mem_mngr.GetMem(start_index, 1, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M2(mem_mngr.GetMem(start_index, 2, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M3(mem_mngr.GetMem(start_index, 3, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M4(mem_mngr.GetMem(start_index, 4, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M5(mem_mngr.GetMem(start_index, 5, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M6(mem_mngr.GetMem(start_index, 6, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M7(mem_mngr.GetMem(start_index, 7, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M8(mem_mngr.GetMem(start_index, 8, total_steps - steps_left, M), C11.m(), C11.m(), C11.n(), C.multiplier());
#else
    Matrix<Scalar> M1(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M2(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M3(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M4(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M5(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M6(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M7(C11.m(), C11.n(), C.multiplier());
    Matrix<Scalar> M8(C11.m(), C11.n(), C.multiplier());
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    bool sequential1 = should_launch_task(8, total_steps, steps_left, start_index, 1, num_threads);
    bool sequential2 = should_launch_task(8, total_steps, steps_left, start_index, 2, num_threads);
    bool sequential3 = should_launch_task(8, total_steps, steps_left, start_index, 3, num_threads);
    bool sequential4 = should_launch_task(8, total_steps, steps_left, start_index, 4, num_threads);
    bool sequential5 = should_launch_task(8, total_steps, steps_left, start_index, 5, num_threads);
    bool sequential6 = should_launch_task(8, total_steps, steps_left, start_index, 6, num_threads);
    bool sequential7 = should_launch_task(8, total_steps, steps_left, start_index, 7, num_threads);
    bool sequential8 = should_launch_task(8, total_steps, steps_left, start_index, 8, num_threads);
#else
    bool sequential1 = false;
    bool sequential2 = false;
    bool sequential3 = false;
    bool sequential4 = false;
    bool sequential5 = false;
    bool sequential6 = false;
    bool sequential7 = false;
    bool sequential8 = false;
#endif



    // M1 = (1 * A11) * (1 * B11)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential1) shared(mem_mngr, locker) untied
    {
#endif
        M1.UpdateMultiplier(Scalar(1));
        M1.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A11, B11, M1, total_steps, steps_left - 1, (start_index + 1 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 1, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M2 = (1 * A12) * (1 * B21)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential2) shared(mem_mngr, locker) untied
    {
#endif
        M2.UpdateMultiplier(Scalar(1));
        M2.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A12, B21, M2, total_steps, steps_left - 1, (start_index + 2 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 2, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M3 = (1 * A11) * (1 * B12)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential3) shared(mem_mngr, locker) untied
    {
#endif
        M3.UpdateMultiplier(Scalar(1));
        M3.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A11, B12, M3, total_steps, steps_left - 1, (start_index + 3 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 3, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M4 = (1 * A12) * (1 * B22)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential4) shared(mem_mngr, locker) untied
    {
#endif
        M4.UpdateMultiplier(Scalar(1));
        M4.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A12, B22, M4, total_steps, steps_left - 1, (start_index + 4 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 4, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M5 = (1 * A21) * (1 * B11)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential5) shared(mem_mngr, locker) untied
    {
#endif
        M5.UpdateMultiplier(Scalar(1));
        M5.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A21, B11, M5, total_steps, steps_left - 1, (start_index + 5 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 5, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M6 = (1 * A22) * (1 * B21)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential6) shared(mem_mngr, locker) untied
    {
#endif
        M6.UpdateMultiplier(Scalar(1));
        M6.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A22, B21, M6, total_steps, steps_left - 1, (start_index + 6 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 6, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M7 = (1 * A21) * (1 * B12)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential7) shared(mem_mngr, locker) untied
    {
#endif
        M7.UpdateMultiplier(Scalar(1));
        M7.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A21, B12, M7, total_steps, steps_left - 1, (start_index + 7 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 7, num_threads)) {
        # pragma omp taskwait
# if defined(_PARALLEL_) && (_PARALLEL_ == _HYBRID_PAR_)
        SwitchToDFS(locker, num_threads);
# endif
    }
#endif

    // M8 = (1 * A22) * (1 * B22)
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    # pragma omp task if(sequential8) shared(mem_mngr, locker) untied
    {
#endif
        M8.UpdateMultiplier(Scalar(1));
        M8.UpdateMultiplier(Scalar(1));
        FastMatmulRecursive(locker, mem_mngr, A22, B22, M8, total_steps, steps_left - 1, (start_index + 8 - 1) * 8, x, num_threads, Scalar(0.0));
#ifndef _PARALLEL_
#endif
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
        locker.Decrement();
    }
    if (should_task_wait(8, total_steps, steps_left, start_index, 8, num_threads)) {
        # pragma omp taskwait
    }
#endif

    M_Add1(M1, M2, C11, x, false, beta);
    M_Add2(M3, M4, C12, x, false, beta);
    M_Add3(M5, M6, C21, x, false, beta);
    M_Add4(M7, M8, C22, x, false, beta);

    // Handle edge cases with dynamic peeling
#if defined(_PARALLEL_) && (_PARALLEL_ == _BFS_PAR_ || _PARALLEL_ == _HYBRID_PAR_)
    if (total_steps == steps_left) {
        mkl_set_num_threads_local(num_threads);
        mkl_set_dynamic(0);
    }
#endif
    DynamicPeeling(A, B, C, 2, 2, 2, beta);
}
Ejemplo n.º 23
0
/*
 * Class:     org_upp_AndroidMath_Vector
 * Method:    multipleByScalar
 * Signature: (F)V
 */
JNIEXPORT void JNICALL Java_org_upp_AndroidMath_Vector_multipleByScalar
  (JNIEnv *env, jobject obj, jfloat scalar)
{
	Vector* vec = mm.Get(env, obj);
	vec->MultipleByScalar(scalar);
}
Ejemplo n.º 24
0
 /**
  * Create new list.
  *
  * Creates a new empty list using @a memory_manager for memory.
  *
  * @param[in] memory_manager Memory manager to use.
  * @return Empty List.
  **/
 static List create(MemoryManager memory_manager)
 {
     ib_list_t* ib_list;
     throw_if_error(ib_list_create(&ib_list, memory_manager.ib()));
     return List(ib_list);
 }
Ejemplo n.º 25
0
			inline Timeline& create() { return timelines.create(); }
int main(int argc, char* argv[])
{
    int iMemorySize = 64, dMemorySize = 32, iMemoryPageSize = 8, dMemoryPageSize = 16;
    int totalICacheSize = 16, iCacheBlockSize = 4, iCacheAssociativity = 4;
    int totalDCacheSize = 16, dCacheBlockSize = 4, dCacheAssociativity = 1;
    /*for(int i=0; i<argc; i++){
        printf("%s\n", argv[i]);
    }*/
    if(argc == 11){
        iMemorySize = atoi(argv[1]);
        dMemorySize = atoi(argv[2]);
        iMemoryPageSize = atoi(argv[3]);
        dMemoryPageSize = atoi(argv[4]);
        totalICacheSize = atoi(argv[5]);
        iCacheBlockSize = atoi(argv[6]);
        iCacheAssociativity = atoi(argv[7]);
        totalDCacheSize = atoi(argv[8]);
        dCacheBlockSize = atoi(argv[9]);
        dCacheAssociativity = atoi(argv[10]);
    }
    MemoryManager* memoryManager = new MemoryManager(iMemorySize, dMemorySize, iMemoryPageSize, dMemoryPageSize,
                                                     totalICacheSize, iCacheBlockSize, iCacheAssociativity, totalDCacheSize,
                                                    dCacheBlockSize, dCacheAssociativity);

    Memory* iMemory;
    Memory* dMemory;
    ControlUnit* controlUnit;
    MyRegister *reg;
    ProgramCounter *pc;
   // unsigned int readSp;
    size_t result;

    unsigned char readArray[4];
    unsigned int readProgramCounter;
    FILE *dImage;
    FILE *iImage;
    FILE *snapShot;
    FILE *errorFile;
    FILE* reportFile = fopen("report.rpt", "w");

    FILE* debug;

    dImage = fopen("dimage.bin", "rb");
    iImage = fopen("iimage.bin", "rb");
    snapShot = fopen("snapshot.rpt", "w");
    errorFile = fopen("error_dump.rpt", "w");
    debug = fopen("debug.rpt", "w");

    //read iimage
    result = fread(readArray, 4, 1, iImage);
    readProgramCounter = readArray[0] << 24 | readArray[1] << 16 | readArray[2] << 8 | readArray[3];
    pc = new ProgramCounter(readProgramCounter);
    iMemory = new Memory(iImage, pc->PC);

    //read dimage
    reg = new MyRegister(dImage, memoryManager);
   // reg->print();
    dMemory = new Memory(dImage, 0);

    memoryManager->initializeDisk(iMemory->memory, dMemory->memory);
    //Decoder d1(iMemory->memory + pc->PC);
    //d1.print();
    //printf("words = %d\n", iMemory->words);

   /* for(int i=0; i<iMemory->words; i++){
        Decoder d2(iMemory->memory + pc->PC + i*4);
        d2.print();
        d2.fprint(insOut);
    }*/

    int cycle = 0, shutDown = 0;
    controlUnit = new ControlUnit(reg, pc, dMemory, errorFile);

    printSnapShot(snapShot, cycle, reg, pc);
    printDebugFile(debug, cycle, reg, pc);
    while(1){
        Decoder d3(iMemory->getMemoryPointer(pc->PC));
        Decoder testMemory(memoryManager->getIData(pc->PC, cycle));
       // printf("0x%x\n", testMemory.instruction);
        //testMemory.print();
        pc->PC += 4;
        fprintf(debug, "instruction = %x\n", d3.instruction);

        d3.printDebug(debug);

        cycle++;
        //printf("%d\n", cycle);
       // memoryManager->displayReport();
        shutDown = controlUnit->execute(&d3, cycle);//run instruction
        if(d3.instructionName == "halt" || shutDown)
            break;

        printSnapShot(snapShot, cycle, reg, pc);
        printDebugFile(debug, cycle, reg, pc);
       // reg->print();



      // system("PAUSE");
    }
   // memoryManager->displayReport();
    memoryManager->printReport(reportFile);
    delete dMemory;
    delete iMemory;
    delete controlUnit;
    delete pc;
    delete reg;
    delete memoryManager;
    fclose(iImage);
    fclose(dImage);
    fclose(snapShot);
    fclose(errorFile);
    fclose(debug);
    fclose(reportFile);
    return 0;
}
Ejemplo n.º 27
0
			inline void clear() noexcept { timelines.clear(); }
status_t OMXCameraAdapter::setTouchFocus()
{
    status_t ret = NO_ERROR;
    OMX_ERRORTYPE eError = OMX_ErrorNone;

    OMX_ALGOAREASTYPE **focusAreas;
    OMX_TI_CONFIG_SHAREDBUFFER sharedBuffer;
    MemoryManager memMgr;
    int areasSize = 0;

    LOG_FUNCTION_NAME;

    if ( OMX_StateInvalid == mComponentState )
        {
        CAMHAL_LOGEA("OMX component is in invalid state");
        ret = -1;
        }

    if ( NO_ERROR == ret )
        {

        areasSize = ((sizeof(OMX_ALGOAREASTYPE)+4095)/4096)*4096;
        focusAreas = (OMX_ALGOAREASTYPE**) memMgr.allocateBuffer(0, 0, NULL, areasSize, 1);

        OMXCameraPortParameters * mPreviewData = NULL;
        mPreviewData = &mCameraAdapterParameters.mCameraPortParams[mCameraAdapterParameters.mPrevPortIndex];

        if (!focusAreas)
            {
            CAMHAL_LOGEB("Error allocating buffer for focus areas %d", eError);
            return -ENOMEM;
            }

        OMX_INIT_STRUCT_PTR (focusAreas[0], OMX_ALGOAREASTYPE);

        focusAreas[0]->nPortIndex = OMX_ALL;
        focusAreas[0]->nNumAreas = mFocusAreas.size();
        focusAreas[0]->nAlgoAreaPurpose = OMX_AlgoAreaFocus;

        // If the area is the special case of (0, 0, 0, 0, 0), then
        // the algorithm needs nNumAreas to be set to 0,
        // in order to automatically choose the best fitting areas.
        if ( mFocusAreas.itemAt(0)->isZeroArea() )
            {
            focusAreas[0]->nNumAreas = 0;
            }

        for ( unsigned int n = 0; n < mFocusAreas.size(); n++)
            {
            // transform the coordinates to 3A-type coordinates
            mFocusAreas.itemAt(n)->transfrom((size_t)mPreviewData->mWidth,
                                            (size_t)mPreviewData->mHeight,
                                            (size_t&)focusAreas[0]->tAlgoAreas[n].nTop,
                                            (size_t&)focusAreas[0]->tAlgoAreas[n].nLeft,
                                            (size_t&)focusAreas[0]->tAlgoAreas[n].nWidth,
                                            (size_t&)focusAreas[0]->tAlgoAreas[n].nHeight);

            focusAreas[0]->tAlgoAreas[n].nLeft =
                    ( focusAreas[0]->tAlgoAreas[n].nLeft * TOUCH_FOCUS_RANGE ) / mPreviewData->mWidth;
            focusAreas[0]->tAlgoAreas[n].nTop =
                    ( focusAreas[0]->tAlgoAreas[n].nTop* TOUCH_FOCUS_RANGE ) / mPreviewData->mHeight;
            focusAreas[0]->tAlgoAreas[n].nWidth =
                    ( focusAreas[0]->tAlgoAreas[n].nWidth * TOUCH_FOCUS_RANGE ) / mPreviewData->mWidth;
            focusAreas[0]->tAlgoAreas[n].nHeight =
                    ( focusAreas[0]->tAlgoAreas[n].nHeight * TOUCH_FOCUS_RANGE ) / mPreviewData->mHeight;
            focusAreas[0]->tAlgoAreas[n].nPriority = mFocusAreas.itemAt(n)->getWeight();

             CAMHAL_LOGDB("Focus area %d : top = %d left = %d width = %d height = %d prio = %d",
                    n, (int)focusAreas[0]->tAlgoAreas[n].nTop, (int)focusAreas[0]->tAlgoAreas[n].nLeft,
                    (int)focusAreas[0]->tAlgoAreas[n].nWidth, (int)focusAreas[0]->tAlgoAreas[n].nHeight,
                    (int)focusAreas[0]->tAlgoAreas[n].nPriority);
             }

        OMX_INIT_STRUCT_PTR (&sharedBuffer, OMX_TI_CONFIG_SHAREDBUFFER);

        sharedBuffer.nPortIndex = OMX_ALL;
        sharedBuffer.nSharedBuffSize = areasSize;
        sharedBuffer.pSharedBuff = (OMX_U8 *) focusAreas[0];

        if ( NULL == sharedBuffer.pSharedBuff )
            {
            CAMHAL_LOGEA("No resources to allocate OMX shared buffer");
            ret = -ENOMEM;
            goto EXIT;
            }

            eError =  OMX_SetConfig(mCameraAdapterParameters.mHandleComp,
                                      (OMX_INDEXTYPE) OMX_TI_IndexConfigAlgoAreas, &sharedBuffer);

        if ( OMX_ErrorNone != eError )
            {
            CAMHAL_LOGEB("Error while setting Focus Areas configuration 0x%x", eError);
            ret = -EINVAL;
            }

    EXIT:
        if (NULL != focusAreas)
            {
            memMgr.freeBuffer((void*) focusAreas);
            focusAreas = NULL;
            }
        }

    LOG_FUNCTION_NAME_EXIT;

    return ret;
}
Ejemplo n.º 29
0
/***
 * 2.5.1.2 List datatypes   
 *   
 * The canonical-lexical-representation for the ·list· datatype is defined as 
 * the lexical form in which each item in the ·list· has the canonical 
 * lexical representation of its ·itemType·.
 ***/
const XMLCh* ListDatatypeValidator::getCanonicalRepresentation(const XMLCh*         const rawData
                                                             ,       MemoryManager* const memMgr
                                                             ,       bool                 toValidate) const
{
    MemoryManager* toUse = memMgr? memMgr : getMemoryManager();
    ListDatatypeValidator* temp = (ListDatatypeValidator*) this;
    temp->setContent(rawData);
    BaseRefVectorOf<XMLCh>* tokenVector = XMLString::tokenizeString(rawData, toUse);
    Janitor<BaseRefVectorOf<XMLCh> > janName(tokenVector);    

    if (toValidate)
    {
        try
        {
            temp->checkContent(tokenVector, rawData, 0, false, toUse);
        }
        catch (...)
        {
            return 0;
        }
    }
   
    unsigned int  retBufSize = 2 * XMLString::stringLen(rawData);
    XMLCh* retBuf = (XMLCh*) toUse->allocate(retBufSize * sizeof(XMLCh));
    retBuf[0] = 0;
    XMLCh* retBufPtr = retBuf;
    DatatypeValidator* itemDv = this->getItemTypeDTV();

    try 
    {
        for (unsigned int i = 0; i < tokenVector->size(); i++)
        {
            XMLCh* itemCanRep = (XMLCh*) itemDv->getCanonicalRepresentation(tokenVector->elementAt(i), toUse, false);
            unsigned int itemLen = XMLString::stringLen(itemCanRep); 

            if(retBufPtr+itemLen+2 >= retBuf+retBufSize)
            {
                // need to resize
                XMLCh * oldBuf = retBuf;
                retBuf = (XMLCh*) toUse->allocate(retBufSize * sizeof(XMLCh) * 4);
                memcpy(retBuf, oldBuf, retBufSize * sizeof(XMLCh ));
                retBufPtr = (retBufPtr - oldBuf) + retBuf;
                toUse->deallocate(oldBuf);
                retBufSize <<= 2;
            }

            XMLString::catString(retBufPtr, itemCanRep);
            retBufPtr = retBufPtr + itemLen + 1;
            *(retBufPtr++) = chSpace;
            *(retBufPtr) = chNull;
            toUse->deallocate(itemCanRep);
        }

        return retBuf;

    }
    catch (...)
    {
        return 0;
    }
}
bool DebugManagerV3::run (uint16_t controlMask, DebugEventTarget * cb, bool releaseJtag)
{
	MemoryManager* mm = this->parent->getMemoryManager();
	MemoryArea* cpu = mm->getMemoryArea("CPU"); 
	if (!cpu)
    {
		return false;
    }

	lpm5WakeupDetected = false;

	if(cb!=0)
	{
		cbx=cb;	
	}

	uint32_t pc, sr;
	cpu->read(0, &pc, 1);
	cpu->read(2, &sr, 1);

	if(mm->flushAll()==false)
	{
		return false;
	}
	
	cycleCounter_.reset();
	ConfigManager *cm = parent->getFetHandle()->getConfigManager();

	const uint16_t mdb = parent->getEmulationManager()->getSoftwareBreakpoints()->getSwbpManager()->getInstructionAt(pc);
	if (mdb != 0)
	{
		mdbPatchValue = mdb;
	}

	HalExecElement* el = new HalExecElement(this->parent->checkHalId(ID_RestoreContext_ReleaseJtag));
	this->parent->getWatchdogControl()->addParamsTo(el);
	el->appendInputData32(pc);
	el->appendInputData16(sr);
	el->appendInputData16(controlMask!=0? 0x0007: 0x0006);	// eem control bits 
	el->appendInputData16(mdbPatchValue);		// mdb
	el->appendInputData16(releaseJtag ? 1 : 0);
	el->appendInputData16(cm->ulpDebugEnabled() ? 1 : 0);

	mdbPatchValue = 0;

	HalExecCommand cmd;
	cmd.elements.push_back(el);

	if (!this->parent->send(cmd))
	{
		return false;
	}
	
	// handle lpmx5 polling
	if (releaseJtag)
	{
		pausePolling();
	}
	else
	{
		this->resumePolling();
	}	

	if (controlMask!=0 && !releaseJtag)
	{
		if (!activatePolling(controlMask))
		{
			return false;
		}
	}

	resetCycleCounterBeforeNextStep = true;

	return true;
}