ThreadStream& operator << (ThreadStream& stream, const ThreadId& thread_id)
{
  return stream << uint32_t(thread_id);
}
Exemplo n.º 2
1
	bool BsonDocument::InstantiateObject(DynamicObject::Table &t, DynamicObject &obj)
	{
		for(DynamicObject::Iterator it = t.begin();it != t.end();++it)
		{
			boost::shared_ptr<TypeInfoBase> member = obj.GetMember(it->first);
			if (member.get()==NULL)
				continue;
			if (member->IsArray())
			{
				boost::shared_ptr<ArrayBase> dest 	= boost::dynamic_pointer_cast<ArrayBase>(member);
				boost::shared_ptr<ArrayBase> source = boost::dynamic_pointer_cast<ArrayBase>(it->second);
				if (source.get()==NULL || dest.get()==NULL)
					continue;
				size_t count = source->Count();
				for(size_t i=0; i<count;++i)
				{
					boost::shared_ptr<TypeInfoBase> ps = (*source)[i];
					if (ps.get()==NULL) 	//invalid member 
						continue;
					if (ps->IsObject())
					{
						DynamicObject::Table &h = ps->GetObject<DynamicObject>()->GetTable();
						boost::shared_ptr<DynamicObject> o = ClassFactory::CreateObject(dest->ClassName);
						if (o.get() == NULL)
							continue;
						InstantiateObject(h, *o);
						boost::shared_ptr<TypeInfoBase> pBase(new TypeInfo<DynamicObject>(o));
						dest->Add(pBase);
					}
					else
					{
						dest->Add(ps);
					}
				}
				it->second.reset();
				continue;
			} 
			else if (member->IsObject())
			{
				boost::shared_ptr<TypeInfoBase> info = it->second;
				if (!info->IsObject())
					continue;
				
				DynamicObject::Table &h = info->GetObject<DynamicObject>()->GetTable();
				boost::shared_ptr<DynamicObject> o = member->GetObject<DynamicObject>();
				if (o.get() != NULL)
					InstantiateObject(h,*o);
				continue;
			}
			//else 
			
			boost::shared_ptr<TypeInfoBase> p = it->second;
			
			if (p->GetType()!=member->GetType())
			{
				if (p->GetType()==typeid(double) && member->GetType()==typeid(float))
				{
					boost::shared_ptr<double> d = p->GetObject<double>();
					boost::shared_ptr<float> f = member->GetObject<float>();
					*f=float(*d);
				}
				else if (p->GetType()==typeid(int64_t) && member->GetType()==typeid(uint64_t))
				{
					boost::shared_ptr<int64_t> d = p->GetObject<int64_t>();
					boost::shared_ptr<uint64_t> f = member->GetObject<uint64_t>();
					*f=uint64_t(*d);
				}
				else if (p->GetType()==typeid(int) && member->GetType()==typeid(uint32_t))
				{
					boost::shared_ptr<int> d = p->GetObject<int>();
					boost::shared_ptr<uint32_t> f = member->GetObject<uint32_t>();
					*f=uint32_t(*d);
				}
			}
			else
			{
				if (p->IsBinary())
				{
					boost::shared_ptr<TypeInfo<Binary> > binarySource = boost::dynamic_pointer_cast<TypeInfo<Binary> >(p);
					boost::shared_ptr<TypeInfo<Binary> > binaryDest = boost::dynamic_pointer_cast<TypeInfo<Binary> >(member);
					if (binarySource.get()!=NULL && binaryDest.get()!=NULL)
						*binaryDest->GetObject() = *binarySource->GetObject();
				}
				else if (p->GetType() == typeid(string))
				{
					boost::shared_ptr<string> strSource = p->GetObject<string>();
					boost::shared_ptr<string> strDest = member->GetObject<string>();
					if (strSource.get()!=NULL && strDest.get()!=NULL)
						*strDest = *strSource;
				}
				else
				{
					if (member->GetObject().get() !=NULL && p->GetObject().get() != NULL)
						memcpy(member->GetObject().get(), p->GetObject().get(), p->GetSize());
				}
			}
		}
		return true;
	}
Exemplo n.º 3
0
bool effect_clouds::load(const char *location_name, const location_params &params)
{
    if (!location_name)
        return false;

    const bool result = read_bdd((std::string("Effect/") + location_name + "/cloud_" + location_name + ".BDD").c_str(), m_clouds);
    if (!result)
        return false;

    std::vector<vert> verts;

    for (int i = 1; i <= 4; ++i)
    {
        if (i!=2) continue; //ToDo

        for (char j = 'A'; j <= 'E'; ++j)
        {
            m_obj_levels.resize(m_obj_levels.size() + 1);

            char buf[512];
            sprintf(buf, "Effect/%s/ObjCloud/Level%d_%c.BOC", location_name, i, j);

            nya_memory::tmp_buffer_scoped res(load_resource(buf));
            assert(res.get_size() > 0);
            nya_memory::memory_reader reader(res.get_data(), res.get_size());

            level_header header = reader.read<level_header>();
            auto &l = m_obj_levels.back();
            l.height = header.height;
            l.offset = (uint32_t)verts.size();
            l.count = header.count * 6;
            verts.resize(l.offset + l.count);
            for (int k = 0; k < header.count; ++k)
            {
                level_entry e = reader.read<level_entry>();
                m_obj_levels.back().entries.push_back(e);

                vert *v = &verts[l.offset + k * 6];

                v[0].dir = nya_math::vec2( -1.0f, -1.0f );
                v[1].dir = nya_math::vec2( -1.0f,  1.0f );
                v[2].dir = nya_math::vec2(  1.0f,  1.0f );
                v[3].dir = nya_math::vec2( -1.0f, -1.0f );
                v[4].dir = nya_math::vec2(  1.0f,  1.0f );
                v[5].dir = nya_math::vec2(  1.0f, -1.0f );

                for (int t = 0; t < 6; ++t)
                {
                    v[t].pos = e.pos;
                    v[t].size.x = e.size;// * 2.0f;
                    v[t].size.y = e.size;// * 2.0f;

                    auto tc=v[t].dir * 0.5f;
                    tc.x += 0.5f, tc.y += 0.5f;
                    tc.y = 1.0 - tc.y;

                    v[t].tc.x = tc.x * e.tc[2] + e.tc[0]; //main
                    v[t].tc.y = tc.y * e.tc[2] + e.tc[1];

                    const float weird_detail_tc_multiply = 0.01f;

                    v[t].tc.z = (tc.x * e.tc[5] + 0.5f) * weird_detail_tc_multiply + e.tc[3]; //detail
                    v[t].tc.w = (tc.y * e.tc[5] + 0.5f) * weird_detail_tc_multiply + e.tc[4];
                }
            }
        }
    }

    m_hi_flat_offset = (uint32_t)verts.size();
    m_hi_flat_count = int(m_clouds.hiflat_clouds.size()) * 6;
    verts.resize(m_hi_flat_offset+ m_hi_flat_count);
    for (int k = 0; k < int(m_clouds.hiflat_clouds.size()); ++k)
    {
        auto &p = m_clouds.hiflat_clouds[k];

        vert *v = &verts[m_hi_flat_offset + k * 6];

        v[0].dir = nya_math::vec2( -1.0f, -1.0f );
        v[1].dir = nya_math::vec2( -1.0f,  1.0f );
        v[2].dir = nya_math::vec2(  1.0f,  1.0f );
        v[3].dir = nya_math::vec2( -1.0f, -1.0f );
        v[4].dir = nya_math::vec2(  1.0f,  1.0f );
        v[5].dir = nya_math::vec2(  1.0f, -1.0f );

        for (int t = 0; t < 6; ++t)
        {
            v[t].pos.x = p.x;
            v[t].pos.z = p.y;

            auto tc=v[t].dir * 0.5f;
            tc.x += 0.5f, tc.y += 0.5f;
            //tc.y = 1.0 - tc.y;

            //ToDo

            nya_math::vec4 uv_param(0.0,0.75,0.25,0.25);

            v[t].tc.x = tc.x * uv_param.z + uv_param.x;
            v[t].tc.y = tc.y * uv_param.w + uv_param.y;

            v[t].tc.x += uv_param.z * (k % 4); //ToDo
        }
    }

    m_mesh.set_vertex_data(&verts[0], uint32_t(sizeof(verts[0])), uint32_t(verts.size()));
    m_mesh.set_vertices(0, 3);
    m_mesh.set_tc(0, 12, 4); //tc1, tc2
    m_mesh.set_tc(1, 12+16, 4); //dir, size

    m_shader_obj.load("shaders/clouds.nsh");
    m_shader_hi_flat.load("shaders/clouds_hi_flat.nsh");
    m_obj_tex = shared::get_texture(shared::load_texture((std::string("Effect/") + location_name + "/ObjCloud.nut").c_str()));
    m_flat_tex = shared::get_texture(shared::load_texture((std::string("Effect/") + location_name + "/FlatCloud.nut").c_str()));

    for (int i = 0; i < m_shader_obj.internal().get_uniforms_count(); ++i)
    {
        auto &name = m_shader_obj.internal().get_uniform(i).name;
        if (name == "pos")
            m_shader_pos = i;

        //else if (name == "fade_farnear")
        //    m_shader_obj.internal().set_uniform_value(i, params.cloud.far_fade_far, params.cloud.far_fade_near, 0.0f, 0.0f);
        else if (name == "obj upper lower")
            m_shader_obj.internal().set_uniform_value(i, params.cloud.ambient_obj_upper, params.cloud.ambient_obj_lower, 0.0f, 0.0f);
        else if (name == "amb low")
        {
            auto amb = params.cloud.ambient_lower_color / 255.0f * params.cloud.ambient_power * params.cloud.intensity;
            m_shader_obj.internal().set_uniform_value(i, amb.x, amb.y, amb.z, 0.0f);
        }
        else if (name == "amb up")
        {
            auto amb = params.cloud.ambient_upper_color / 255.0f * params.cloud.ambient_power * params.cloud.intensity;
            m_shader_obj.internal().set_uniform_value(i, amb.x, amb.y, amb.z, 0.0f);
        }
        else if (name == "diff")
        {
            auto diff = params.cloud.diffuse_color / 255.0f * params.cloud.diffuse_power * params.cloud.intensity;
            m_shader_obj.internal().set_uniform_value(i, diff.x, diff.y, diff.z, 0.0f);
        }
        else if (name == "diffuse min")
            m_shader_obj.internal().set_uniform_value(i, params.cloud.diffuse_min, 0.0f, 0.0f, 0.0f);
        else if(name == "sprite light dir")
            m_shader_obj.internal().set_uniform_value(i, -params.sky.sun_dir.x, -params.sky.sun_dir.y, -params.sky.sun_dir.z, 0.0f);
    }


    for (int i = 0; i < m_shader_hi_flat.internal().get_uniforms_count(); ++i)
    {
        auto &name = m_shader_hi_flat.internal().get_uniform(i).name;
        if (name == "color")
            m_shader_hi_flat.internal().set_uniform_value(i, 1.0f, 1.0f, 1.0f, params.cloud.highflat_alpha / 255.0f);
    }

    m_dist_sort.resize(m_clouds.obj_clouds.size());

    return true;
}
void AdapterTimeSeriesDataSetTest::test_deserialise_timing()
{
    try {
        // Create configuration node.
        _fixedSizePackets = "false";
        _config = _configXml(_fixedSizePackets, _dataBitSize,
                _udpPacketsPerIteration, _samplesPerPacket,
                _outputChannelsPerSubband, _subbandsPerPacket, _nRawPolarisations);

        typedef TYPES::i16complex i16c;

        // Construct the adapter.
        AdapterTimeSeriesDataSet adapter(_config);

        // Construct a data blob to adapt into.
        TimeSeriesDataSetC32 timeSeries;

        unsigned nTimes = (_udpPacketsPerIteration * _samplesPerPacket);
        unsigned nTimeBlocks = nTimes / _outputChannelsPerSubband;
        unsigned nData = _subbandsPerPacket * _nRawPolarisations * _samplesPerPacket;
        size_t packetSize = sizeof(UDPPacket::Header) + (nData * _dataBitSize * 2) / 8;
        size_t chunkSize = packetSize * _udpPacketsPerIteration;

        // Configure the adapter setting the data blob, chunk size and service data.
        adapter.config(&timeSeries, chunkSize, QHash<QString, DataBlob*>());

        // Create and fill UDP packets.
        std::vector<UDPPacket> packets(_udpPacketsPerIteration);
        unsigned index = 0;

        for (unsigned i = 0; i < _udpPacketsPerIteration; ++i)
        {
            // Fill in the header
            packets[i].header.version             = uint8_t(0 + i);
            packets[i].header.sourceInfo          = uint8_t(1 + i);
            packets[i].header.configuration       = uint16_t(_dataBitSize);
            packets[i].header.station             = uint16_t(3 + i);
            packets[i].header.nrBeamlets          = uint8_t(4 + i);
            packets[i].header.nrBlocks            = uint8_t(5 + i);
            packets[i].header.timestamp           = uint32_t(6 + i);
            packets[i].header.blockSequenceNumber = uint32_t(7 + i);

            // Fill in the data
            for (unsigned ii = 0, t = 0; t < _samplesPerPacket; ++t) {
                for (unsigned c = 0; c < _subbandsPerPacket; ++c) {
                    for (unsigned p = 0; p < _nRawPolarisations; ++p) {
                        i16c* data = reinterpret_cast<i16c*>(packets[i].data);
                        index = _nRawPolarisations * (t * _subbandsPerPacket + c) + p;
                        data[index] = i16c(ii++, i);
                    }
                }
            }
        }


        // Stick the chunk of packets into an QIODevice (buffer).
        {
            QBuffer buffer;
            buffer.setData(reinterpret_cast<char*>(&packets[0]), chunkSize);
            buffer.open(QBuffer::ReadOnly);
            adapter.deserialise(&buffer);
        }

        QBuffer buffer;
        buffer.setData(reinterpret_cast<char*>(&packets[0]), chunkSize);
        buffer.open(QBuffer::ReadOnly);

        QTime timer;
        timer.start();
        adapter.deserialise(&buffer);
        int elapsed = timer.elapsed();

//        std::cout << timeSeries.timeSeries(0) <<

        cout << endl;
        cout << "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" << endl;
        cout << "[AdapterTimeSeriesDataSet]: deserialise() " << endl;
        cout << "- nChan = " << _outputChannelsPerSubband << endl << endl;
        if (_verbose) {
            cout << "- nBlocks = " << nTimeBlocks << endl;
            cout << "- nSubbands = " << _subbandsPerPacket << endl;
            cout << "- nPols = " << _nRawPolarisations << endl;
            cout << "- nTimes = " << nTimes << endl;
        }
        cout << "* Elapsed = " << elapsed << " ms." << endl;
        cout << "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" << endl;

    }
    catch (const QString& err) {
        CPPUNIT_FAIL(err.toStdString().data());
    }
}
Exemplo n.º 5
0
template <class T>
void benchmarkGet(size_t n, T x) {
  size_t size = sizeof(T) * 6.9; // use 6.9 bits/byte
  for (size_t i = 0; i < n; ++i) {
    size_t bit = (i * 2973) % (kBufferSize * 8);
    size_t drop = i % size;
    x += folly::Bits<T>::get(
        reinterpret_cast<T *>(buffer.data()), bit, size - drop);
  }
  folly::doNotOptimizeAway(x);
}

BENCHMARK_NAMED_PARAM(benchmarkGet, u16, uint16_t(0))
BENCHMARK_RELATIVE_NAMED_PARAM(benchmarkGet, i16, int16_t(0))
BENCHMARK_NAMED_PARAM(benchmarkGet, u32, uint32_t(0))
BENCHMARK_RELATIVE_NAMED_PARAM(benchmarkGet, i32, int32_t(0))
BENCHMARK_NAMED_PARAM(benchmarkGet, u64, uint64_t(0))
BENCHMARK_RELATIVE_NAMED_PARAM(benchmarkGet, i64, int64_t(0))

#if 0
============================================================================
folly/experimental/test/BitsBenchmark.cpp       relative  time/iter  iters/s
============================================================================
benchmarkSet(u16)                                            8.58ns  116.59M
benchmarkSet(i16)                                 88.42%     9.70ns  103.08M
benchmarkSet(u32)                                            8.37ns  119.45M
benchmarkSet(i32)                                 88.23%     9.49ns  105.39M
benchmarkSet(u64)                                            9.23ns  108.34M
benchmarkSet(i64)                                 82.77%    11.15ns   89.68M
----------------------------------------------------------------------------
Exemplo n.º 6
0
 public_key_address(const fc::ecc::public_key_data& k)
   {
   key = k;
   check = uint32_t(fc::city_hash64( (char*)&key, sizeof(key) ));
   }
Exemplo n.º 7
0
JS::Value
WebGLContext::GetParameter(JSContext* cx, GLenum pname, ErrorResult& rv)
{
    const char funcName[] = "getParameter";

    if (IsContextLost())
        return JS::NullValue();

    MakeContextCurrent();

    if (MinCapabilityMode()) {
        switch(pname) {
            ////////////////////////////
            // Single-value params

            // int
            case LOCAL_GL_MAX_VERTEX_ATTRIBS:
                return JS::Int32Value(MINVALUE_GL_MAX_VERTEX_ATTRIBS);

            case LOCAL_GL_MAX_FRAGMENT_UNIFORM_VECTORS:
                return JS::Int32Value(MINVALUE_GL_MAX_FRAGMENT_UNIFORM_VECTORS);

            case LOCAL_GL_MAX_VERTEX_UNIFORM_VECTORS:
                return JS::Int32Value(MINVALUE_GL_MAX_VERTEX_UNIFORM_VECTORS);

            case LOCAL_GL_MAX_VARYING_VECTORS:
                return JS::Int32Value(MINVALUE_GL_MAX_VARYING_VECTORS);

            case LOCAL_GL_MAX_TEXTURE_SIZE:
                return JS::Int32Value(MINVALUE_GL_MAX_TEXTURE_SIZE);

            case LOCAL_GL_MAX_CUBE_MAP_TEXTURE_SIZE:
                return JS::Int32Value(MINVALUE_GL_MAX_CUBE_MAP_TEXTURE_SIZE);

            case LOCAL_GL_MAX_TEXTURE_IMAGE_UNITS:
                return JS::Int32Value(MINVALUE_GL_MAX_TEXTURE_IMAGE_UNITS);

            case LOCAL_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
                return JS::Int32Value(MINVALUE_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS);

            case LOCAL_GL_MAX_RENDERBUFFER_SIZE:
                return JS::Int32Value(MINVALUE_GL_MAX_RENDERBUFFER_SIZE);

            default:
                // Return the real value; we're not overriding this one
                break;
        }
    }

    if (IsWebGL2() || IsExtensionEnabled(WebGLExtensionID::WEBGL_draw_buffers)) {
        if (pname == LOCAL_GL_MAX_COLOR_ATTACHMENTS) {
            return JS::Int32Value(mImplMaxColorAttachments);

        } else if (pname == LOCAL_GL_MAX_DRAW_BUFFERS) {
            return JS::Int32Value(mImplMaxDrawBuffers);

        } else if (pname >= LOCAL_GL_DRAW_BUFFER0 &&
                   pname < GLenum(LOCAL_GL_DRAW_BUFFER0 + mImplMaxDrawBuffers))
        {
            GLint iv = 0;
            gl->fGetIntegerv(pname, &iv);

            if (mBoundDrawFramebuffer)
                return JS::Int32Value(iv);

            const GLint index = (pname - LOCAL_GL_DRAW_BUFFER0);
            if (iv == LOCAL_GL_COLOR_ATTACHMENT0 + index)
                return JS::Int32Value(LOCAL_GL_BACK);

            return JS::Int32Value(LOCAL_GL_NONE);
        }
    }

    if (IsWebGL2() || IsExtensionEnabled(WebGLExtensionID::OES_vertex_array_object)) {
        if (pname == LOCAL_GL_VERTEX_ARRAY_BINDING) {
            WebGLVertexArray* vao =
                (mBoundVertexArray != mDefaultVertexArray) ? mBoundVertexArray.get() : nullptr;
            return WebGLObjectAsJSValue(cx, vao, rv);
        }
    }

    if (IsWebGL2() || IsExtensionEnabled(WebGLExtensionID::EXT_disjoint_timer_query)) {
        if (pname == LOCAL_GL_TIMESTAMP_EXT) {
            GLuint64 iv = 0;
            if (HasTimestampBits()) {
                gl->fGetInteger64v(pname, (GLint64*)&iv);
            } else {
                GenerateWarning("QUERY_COUNTER_BITS_EXT for TIMESTAMP_EXT is 0.");
            }
            // TODO: JS doesn't support 64-bit integers. Be lossy and
            // cast to double (53 bits)
            return JS::NumberValue(static_cast<double>(iv));
        } else if (pname == LOCAL_GL_GPU_DISJOINT_EXT) {
            // When disjoint isn't supported, leave as false.
            realGLboolean disjoint = LOCAL_GL_FALSE;
            if (gl->IsExtensionSupported(gl::GLContext::EXT_disjoint_timer_query)) {
                gl->fGetBooleanv(pname, &disjoint);
            }
            return JS::BooleanValue(bool(disjoint));
        }
    }

    // Privileged string params exposed by WEBGL_debug_renderer_info.
    // The privilege check is done in WebGLContext::IsExtensionSupported.
    // So here we just have to check that the extension is enabled.
    if (IsExtensionEnabled(WebGLExtensionID::WEBGL_debug_renderer_info)) {
        switch (pname) {
        case UNMASKED_VENDOR_WEBGL:
        case UNMASKED_RENDERER_WEBGL:
            {
                const char* overridePref = nullptr;
                GLenum driverEnum = LOCAL_GL_NONE;

                switch (pname) {
                case UNMASKED_RENDERER_WEBGL:
                    overridePref = "webgl.renderer-string-override";
                    driverEnum = LOCAL_GL_RENDERER;
                    break;
                case UNMASKED_VENDOR_WEBGL:
                    overridePref = "webgl.vendor-string-override";
                    driverEnum = LOCAL_GL_VENDOR;
                    break;
                default:
                    MOZ_CRASH("GFX: bad `pname`");
                }

                bool hasRetVal = false;

                nsAutoString ret;
                if (overridePref) {
                    nsresult res = Preferences::GetString(overridePref, &ret);
                    if (NS_SUCCEEDED(res) && ret.Length() > 0)
                        hasRetVal = true;
                }

                if (!hasRetVal) {
                    const char* chars = reinterpret_cast<const char*>(gl->fGetString(driverEnum));
                    ret = NS_ConvertASCIItoUTF16(chars);
                    hasRetVal = true;
                }

                return StringValue(cx, ret, rv);
            }
        }
    }

    if (IsWebGL2() || IsExtensionEnabled(WebGLExtensionID::OES_standard_derivatives)) {
        if (pname == LOCAL_GL_FRAGMENT_SHADER_DERIVATIVE_HINT) {
            GLint i = 0;
            gl->fGetIntegerv(pname, &i);
            return JS::Int32Value(i);
        }
    }

    if (IsExtensionEnabled(WebGLExtensionID::EXT_texture_filter_anisotropic)) {
        if (pname == LOCAL_GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT) {
            GLfloat f = 0.f;
            gl->fGetFloatv(pname, &f);
            return JS::NumberValue(f);
        }
    }

    switch (pname) {
        //
        // String params
        //
        case LOCAL_GL_VENDOR:
        case LOCAL_GL_RENDERER:
            return StringValue(cx, "Mozilla", rv);
        case LOCAL_GL_VERSION:
            return StringValue(cx, "WebGL 1.0", rv);
        case LOCAL_GL_SHADING_LANGUAGE_VERSION:
            return StringValue(cx, "WebGL GLSL ES 1.0", rv);

        ////////////////////////////////
        // Single-value params

        // unsigned int
        case LOCAL_GL_CULL_FACE_MODE:
        case LOCAL_GL_FRONT_FACE:
        case LOCAL_GL_ACTIVE_TEXTURE:
        case LOCAL_GL_STENCIL_FUNC:
        case LOCAL_GL_STENCIL_FAIL:
        case LOCAL_GL_STENCIL_PASS_DEPTH_FAIL:
        case LOCAL_GL_STENCIL_PASS_DEPTH_PASS:
        case LOCAL_GL_STENCIL_BACK_FUNC:
        case LOCAL_GL_STENCIL_BACK_FAIL:
        case LOCAL_GL_STENCIL_BACK_PASS_DEPTH_FAIL:
        case LOCAL_GL_STENCIL_BACK_PASS_DEPTH_PASS:
        case LOCAL_GL_DEPTH_FUNC:
        case LOCAL_GL_BLEND_SRC_RGB:
        case LOCAL_GL_BLEND_SRC_ALPHA:
        case LOCAL_GL_BLEND_DST_RGB:
        case LOCAL_GL_BLEND_DST_ALPHA:
        case LOCAL_GL_BLEND_EQUATION_RGB:
        case LOCAL_GL_BLEND_EQUATION_ALPHA:
        case LOCAL_GL_GENERATE_MIPMAP_HINT: {
            GLint i = 0;
            gl->fGetIntegerv(pname, &i);
            return JS::NumberValue(uint32_t(i));
        }
        case LOCAL_GL_IMPLEMENTATION_COLOR_READ_TYPE: {
            const webgl::FormatUsageInfo* usage;
            uint32_t width, height;
            if (!ValidateCurFBForRead(funcName, &usage, &width, &height))
                return JS::NullValue();

            GLint i = 0;
            if (gl->IsSupported(gl::GLFeature::ES2_compatibility)) {
                gl->fGetIntegerv(pname, &i);
            } else {
                i = LOCAL_GL_UNSIGNED_BYTE;
            }

            return JS::NumberValue(uint32_t(i));
        }
        case LOCAL_GL_IMPLEMENTATION_COLOR_READ_FORMAT: {
            const webgl::FormatUsageInfo* usage;
            uint32_t width, height;
            if (!ValidateCurFBForRead(funcName, &usage, &width, &height))
                return JS::NullValue();

            GLint i = 0;
            if (gl->IsSupported(gl::GLFeature::ES2_compatibility)) {
                gl->fGetIntegerv(pname, &i);
            } else {
                i = LOCAL_GL_RGBA;
            }

            // OpenGL ES 3.0.4 p112 Table 3.2 shows that read format SRGB_ALPHA is
            // not supported. And if internal format of fbo is SRGB8_ALPHA8, then
            // IMPLEMENTATION_COLOR_READ_FORMAT is SRGB_ALPHA which is not supported
            // by ReadPixels. So, just return RGBA here.
            if (i == LOCAL_GL_SRGB_ALPHA)
                i = LOCAL_GL_RGBA;

            return JS::NumberValue(uint32_t(i));
        }
        // int
        case LOCAL_GL_STENCIL_REF:
        case LOCAL_GL_STENCIL_BACK_REF: {
            GLint stencilBits = 0;
            if (!GetStencilBits(&stencilBits))
                return JS::NullValue();

            // Assuming stencils have 8 bits
            const GLint stencilMask = (1 << stencilBits) - 1;

            GLint refValue = 0;
            gl->fGetIntegerv(pname, &refValue);

            return JS::Int32Value(refValue & stencilMask);
        }

        case LOCAL_GL_STENCIL_CLEAR_VALUE:
        case LOCAL_GL_UNPACK_ALIGNMENT:
        case LOCAL_GL_PACK_ALIGNMENT:
        case LOCAL_GL_SUBPIXEL_BITS:
        case LOCAL_GL_SAMPLE_BUFFERS:
        case LOCAL_GL_SAMPLES:
        case LOCAL_GL_MAX_VERTEX_ATTRIBS:
        case LOCAL_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
        case LOCAL_GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
        case LOCAL_GL_MAX_TEXTURE_IMAGE_UNITS: {
            GLint i = 0;
            gl->fGetIntegerv(pname, &i);
            return JS::Int32Value(i);
        }

        case LOCAL_GL_RED_BITS:
        case LOCAL_GL_GREEN_BITS:
        case LOCAL_GL_BLUE_BITS:
        case LOCAL_GL_ALPHA_BITS:
        case LOCAL_GL_DEPTH_BITS:
        case LOCAL_GL_STENCIL_BITS: {
            // Deprecated and removed in GL Core profiles, so special handling required.
            GLint val;
            if (!GetChannelBits(funcName, pname, &val))
                return JS::NullValue();

            return JS::Int32Value(val);
        }

        case LOCAL_GL_MAX_TEXTURE_SIZE:
            return JS::Int32Value(mImplMaxTextureSize);

        case LOCAL_GL_MAX_CUBE_MAP_TEXTURE_SIZE:
            return JS::Int32Value(mImplMaxCubeMapTextureSize);

        case LOCAL_GL_MAX_RENDERBUFFER_SIZE:
            return JS::Int32Value(mImplMaxRenderbufferSize);

        case LOCAL_GL_MAX_VERTEX_UNIFORM_VECTORS:
            return JS::Int32Value(mGLMaxVertexUniformVectors);

        case LOCAL_GL_MAX_FRAGMENT_UNIFORM_VECTORS:
            return JS::Int32Value(mGLMaxFragmentUniformVectors);

        case LOCAL_GL_MAX_VARYING_VECTORS:
            return JS::Int32Value(mGLMaxVaryingVectors);

        case LOCAL_GL_COMPRESSED_TEXTURE_FORMATS: {
            uint32_t length = mCompressedTextureFormats.Length();
            JSObject* obj = dom::Uint32Array::Create(cx, this, length,
                                                     mCompressedTextureFormats.Elements());
            if (!obj) {
                rv = NS_ERROR_OUT_OF_MEMORY;
            }
            return JS::ObjectOrNullValue(obj);
        }

        // unsigned int. here we may have to return very large values like 2^32-1 that can't be represented as
        // javascript integer values. We just return them as doubles and javascript doesn't care.
        case LOCAL_GL_STENCIL_BACK_VALUE_MASK:
            return JS::DoubleValue(mStencilValueMaskBack); // pass as FP value to allow large values such as 2^32-1.

        case LOCAL_GL_STENCIL_BACK_WRITEMASK:
            return JS::DoubleValue(mStencilWriteMaskBack);

        case LOCAL_GL_STENCIL_VALUE_MASK:
            return JS::DoubleValue(mStencilValueMaskFront);

        case LOCAL_GL_STENCIL_WRITEMASK:
            return JS::DoubleValue(mStencilWriteMaskFront);

        // float
        case LOCAL_GL_LINE_WIDTH:
            return JS::DoubleValue(mLineWidth);

        case LOCAL_GL_DEPTH_CLEAR_VALUE:
        case LOCAL_GL_POLYGON_OFFSET_FACTOR:
        case LOCAL_GL_POLYGON_OFFSET_UNITS:
        case LOCAL_GL_SAMPLE_COVERAGE_VALUE: {
            GLfloat f = 0.f;
            gl->fGetFloatv(pname, &f);
            return JS::DoubleValue(f);
        }

        // bool
        case LOCAL_GL_BLEND:
        case LOCAL_GL_DEPTH_TEST:
        case LOCAL_GL_STENCIL_TEST:
        case LOCAL_GL_CULL_FACE:
        case LOCAL_GL_DITHER:
        case LOCAL_GL_POLYGON_OFFSET_FILL:
        case LOCAL_GL_SCISSOR_TEST:
        case LOCAL_GL_SAMPLE_COVERAGE_INVERT:
        case LOCAL_GL_DEPTH_WRITEMASK: {
            realGLboolean b = 0;
            gl->fGetBooleanv(pname, &b);
            return JS::BooleanValue(bool(b));
        }

        // bool, WebGL-specific
        case UNPACK_FLIP_Y_WEBGL:
            return JS::BooleanValue(mPixelStore_FlipY);
        case UNPACK_PREMULTIPLY_ALPHA_WEBGL:
            return JS::BooleanValue(mPixelStore_PremultiplyAlpha);

        // uint, WebGL-specific
        case UNPACK_COLORSPACE_CONVERSION_WEBGL:
            return JS::NumberValue(uint32_t(mPixelStore_ColorspaceConversion));

        ////////////////////////////////
        // Complex values

        // 2 floats
        case LOCAL_GL_DEPTH_RANGE:
        case LOCAL_GL_ALIASED_POINT_SIZE_RANGE:
        case LOCAL_GL_ALIASED_LINE_WIDTH_RANGE: {
            GLenum driverPName = pname;
            if (gl->IsCoreProfile() &&
                driverPName == LOCAL_GL_ALIASED_POINT_SIZE_RANGE)
            {
                driverPName = LOCAL_GL_POINT_SIZE_RANGE;
            }

            GLfloat fv[2] = { 0 };
            gl->fGetFloatv(driverPName, fv);
            JSObject* obj = dom::Float32Array::Create(cx, this, 2, fv);
            if (!obj) {
                rv = NS_ERROR_OUT_OF_MEMORY;
            }
            return JS::ObjectOrNullValue(obj);
        }

        // 4 floats
        case LOCAL_GL_COLOR_CLEAR_VALUE:
        case LOCAL_GL_BLEND_COLOR: {
            GLfloat fv[4] = { 0 };
            gl->fGetFloatv(pname, fv);
            JSObject* obj = dom::Float32Array::Create(cx, this, 4, fv);
            if (!obj) {
                rv = NS_ERROR_OUT_OF_MEMORY;
            }
            return JS::ObjectOrNullValue(obj);
        }

        // 2 ints
        case LOCAL_GL_MAX_VIEWPORT_DIMS: {
            GLint iv[2] = { 0 };
            gl->fGetIntegerv(pname, iv);
            JSObject* obj = dom::Int32Array::Create(cx, this, 2, iv);
            if (!obj) {
                rv = NS_ERROR_OUT_OF_MEMORY;
            }
            return JS::ObjectOrNullValue(obj);
        }

        // 4 ints
        case LOCAL_GL_SCISSOR_BOX:
        case LOCAL_GL_VIEWPORT: {
            GLint iv[4] = { 0 };
            gl->fGetIntegerv(pname, iv);
            JSObject* obj = dom::Int32Array::Create(cx, this, 4, iv);
            if (!obj) {
                rv = NS_ERROR_OUT_OF_MEMORY;
            }
            return JS::ObjectOrNullValue(obj);
        }

        // 4 bools
        case LOCAL_GL_COLOR_WRITEMASK: {
            realGLboolean gl_bv[4] = { 0 };
            gl->fGetBooleanv(pname, gl_bv);
            bool vals[4] = { bool(gl_bv[0]), bool(gl_bv[1]),
                             bool(gl_bv[2]), bool(gl_bv[3]) };
            JS::Rooted<JS::Value> arr(cx);
            if (!dom::ToJSValue(cx, vals, &arr)) {
                rv = NS_ERROR_OUT_OF_MEMORY;
            }
            return arr;
        }

        case LOCAL_GL_ARRAY_BUFFER_BINDING: {
            return WebGLObjectAsJSValue(cx, mBoundArrayBuffer.get(), rv);
        }

        case LOCAL_GL_ELEMENT_ARRAY_BUFFER_BINDING: {
            return WebGLObjectAsJSValue(cx, mBoundVertexArray->mElementArrayBuffer.get(), rv);
        }

        case LOCAL_GL_RENDERBUFFER_BINDING: {
            return WebGLObjectAsJSValue(cx, mBoundRenderbuffer.get(), rv);
        }

        // DRAW_FRAMEBUFFER_BINDING is the same as FRAMEBUFFER_BINDING.
        case LOCAL_GL_FRAMEBUFFER_BINDING: {
            return WebGLObjectAsJSValue(cx, mBoundDrawFramebuffer.get(), rv);
        }

        case LOCAL_GL_CURRENT_PROGRAM: {
            return WebGLObjectAsJSValue(cx, mCurrentProgram.get(), rv);
        }

        case LOCAL_GL_TEXTURE_BINDING_2D: {
            return WebGLObjectAsJSValue(cx, mBound2DTextures[mActiveTexture].get(), rv);
        }

        case LOCAL_GL_TEXTURE_BINDING_CUBE_MAP: {
            return WebGLObjectAsJSValue(cx, mBoundCubeMapTextures[mActiveTexture].get(), rv);
        }

        default:
            break;
    }

    ErrorInvalidEnumInfo("getParameter: parameter", pname);
    return JS::NullValue();
}
Exemplo n.º 8
0
const FHardwareTexture *FGLTexture::Bind(int texunit, int clampmode, int translation, FTexture *hirescheck)
{
	int usebright = false;
	bool alphatrans = false;

	if (translation <= 0) translation = -translation;
	else
	{
		alphatrans = (gl.legacyMode && uint32_t(translation) == TRANSLATION(TRANSLATION_Standard, 8));
		translation = GLTranslationPalette::GetInternalTranslation(translation);
	}

	bool needmipmap = (clampmode <= CLAMP_XY);

	FHardwareTexture *hwtex = CreateHwTexture();

	if (hwtex)
	{
		// Texture has become invalid
		if ((!tex->bHasCanvas && (!tex->bWarped || gl.legacyMode)) && tex->CheckModified())
		{
			Clean(true);
			hwtex = CreateHwTexture();
		}

		// Bind it to the system.
		if (!hwtex->Bind(texunit, translation, needmipmap))
		{
			
			int w=0, h=0;

			// Create this texture
			unsigned char * buffer = NULL;
			
			if (!tex->bHasCanvas)
			{
				buffer = CreateTexBuffer(translation, w, h, hirescheck, true, alphatrans);
				if (tex->bWarped && gl.legacyMode && w*h <= 256*256)	// do not software-warp larger textures, especially on the old systems that still need this fallback.
				{
					// need to do software warping
					FWarpTexture *wt = static_cast<FWarpTexture*>(tex);
					unsigned char *warpbuffer = new unsigned char[w*h*4];
					WarpBuffer((uint32_t*)warpbuffer, (const uint32_t*)buffer, w, h, wt->WidthOffsetMultiplier, wt->HeightOffsetMultiplier, screen->FrameTime, wt->Speed, tex->bWarped);
					delete[] buffer;
					buffer = warpbuffer;
					wt->GenTime = screen->FrameTime;
				}
				tex->ProcessData(buffer, w, h, false);
			}
			if (!hwtex->CreateTexture(buffer, w, h, texunit, needmipmap, translation, "FGLTexture.Bind",true))
			{
				// could not create texture
				delete[] buffer;
				return NULL;
			}
			delete[] buffer;
		}
		if (tex->bHasCanvas) static_cast<FCanvasTexture*>(tex)->NeedUpdate();
		if (translation != lastTranslation) lastSampler = 254;
		if (lastSampler != clampmode)
			lastSampler = GLRenderer->mSamplerManager->Bind(texunit, clampmode, lastSampler);
		lastTranslation = translation;
		return hwtex; 
	}
	return NULL;
}
Exemplo n.º 9
0
void ZipIntKeyIndex::build(ColumnType keyType, SortableStrVec& strVec) {
	assert(strVec.m_index.size() == 0);
	m_keyType = keyType;
	void*  data = strVec.m_strpool.data();
	size_t size = strVec.m_strpool.size();
	switch (keyType) {
	default:
		THROW_STD(invalid_argument, "Bad keyType=%s", Schema::columnTypeStr(keyType));
	case ColumnType::Sint08: zipKeys< int8_t >(data, size); break;
	case ColumnType::Uint08: zipKeys<uint8_t >(data, size); break;
	case ColumnType::Sint16: zipKeys< int16_t>(data, size); break;
	case ColumnType::Uint16: zipKeys<uint16_t>(data, size); break;
	case ColumnType::Sint32: zipKeys< int32_t>(data, size); break;
	case ColumnType::Uint32: zipKeys<uint32_t>(data, size); break;
	case ColumnType::Sint64: zipKeys< int64_t>(data, size); break;
	case ColumnType::Uint64: zipKeys<uint64_t>(data, size); break;
	case ColumnType::VarSint: {
		valvec<llong> tmp;
		const byte* pos = strVec.m_strpool.data();
		const byte* end = strVec.m_strpool.end();
		while (pos < end) {
			const byte* next = nullptr;
			llong key = load_var_int64(pos, &next);
			tmp.push_back(key);
			pos = next;
		}
		zipKeys<int64_t>(tmp.data(), tmp.used_mem_size());
		break; }
	case ColumnType::VarUint: {
		valvec<ullong> tmp;
		const byte* pos = strVec.m_strpool.data();
		const byte* end = strVec.m_strpool.end();
		while (pos < end) {
			const byte* next = nullptr;
			ullong key = load_var_uint64(pos, &next);
			tmp.push_back(key);
			pos = next;
		}
		zipKeys<uint64_t>(tmp.data(), tmp.used_mem_size());
		break; }
	}
	valvec<uint32_t> index(m_keys.size(), valvec_no_init());
	for (size_t i = 0; i < index.size(); ++i) index[i] = uint32_t(i);
	std::sort(index.begin(), index.end(), [&](size_t x, size_t y) {
		size_t xkey = m_keys.get(x);
		size_t ykey = m_keys.get(y);
		if (xkey < ykey) return true;
		if (xkey > ykey) return false;
		return x < y;
	});
	auto minIdx = m_index.build_from(index);
	(void)minIdx;
#if !defined(NDEBUG)
	assert(0 == minIdx);
	for(size_t i = 1; i < m_index.size(); ++i) {
		size_t xi = m_index.get(i-1);
		size_t yi = m_index.get(i-0);
		size_t xk = m_keys.get(xi);
		size_t yk = m_keys.get(yi);
		assert(xk <= yk);
	}
#endif
}
    //update the kinect
    void Device::updateKinect()
    {
        libfreenect2::FrameMap frames;
        

        //Temporary arrays
        float * newDepth = new float[FRAME_SIZE_DEPTH];
        float * newIr    = new float[FRAME_SIZE_DEPTH];
        float * newUndisorted =  new float[FRAME_SIZE_DEPTH];
        
        libfreenect2::Frame undistorted(512, 424, 4), registered(512, 424, 4);
                                                                 
        //MAIN THREAD
        while(initialized_device){
            listener->waitForNewFrame(frames);
            
            if(enableRegistered){
                
                libfreenect2::Frame *  rgb   = frames[libfreenect2::Frame::Color];
                memcpy(colorData, reinterpret_cast<const uint32_t *>(rgb->data), 1920 * 1080 * 4);
                
                libfreenect2::Frame *  depth = frames[libfreenect2::Frame::Depth];
                memcpy(newDepth, reinterpret_cast<const float * >(depth->data), FRAME_BYTE_SIZE_DEPTH);
                
                 //Mappers RGB + Depth
                registration->apply(rgb, depth, &undistorted, &registered);
                memcpy(newUndisorted, reinterpret_cast<const float * >(undistorted.data), FRAME_BYTE_SIZE_DEPTH);
                memcpy(registeredData, reinterpret_cast<const uint32_t * >(registered.data), FRAME_BYTE_SIZE_DEPTH);
            }else if(enableVideo && !enableDepth){
                
                libfreenect2::Frame *  rgb   = frames[libfreenect2::Frame::Color];
                memcpy(colorData, reinterpret_cast<const uint32_t *>(rgb->data), 1920 * 1080 * 4);
            }else if( !enableVideo && enableDepth ){
                
                libfreenect2::Frame *  depth = frames[libfreenect2::Frame::Depth];
                memcpy(newDepth, reinterpret_cast<const float * >(depth->data), FRAME_BYTE_SIZE_DEPTH);
            }else if(enableVideo && enableDepth && !enableRegistered){
                
                libfreenect2::Frame *  rgb   = frames[libfreenect2::Frame::Color];
                memcpy(colorData, reinterpret_cast<const uint32_t *>(rgb->data), 1920 * 1080 * 4);
                
                libfreenect2::Frame *  depth = frames[libfreenect2::Frame::Depth];
                memcpy(newDepth, reinterpret_cast<const float * >(depth->data), FRAME_BYTE_SIZE_DEPTH);
            }
        

            if(enableIR){
                libfreenect2::Frame *  ir    = frames[libfreenect2::Frame::Ir];
                memcpy(newIr, reinterpret_cast<const float * >(ir->data), FRAME_BYTE_SIZE_DEPTH);
            }
       
                int indexFD = 0;
                int pIndexEnd = (FRAME_SIZE_DEPTH);
                
                int indexX = 0;
                int indexY = 0;
                int cameraXYZ = 0;
                while(indexFD < pIndexEnd){
                    float depth = newDepth[indexFD];
                    
                    //Depth
                    //0.0566666f -> (value/45000)* 255
                    rawDepthData[indexFD] = uint32_t(depth);
                   
                    //IR
                    irData[indexFD]  = colorByte2Int((uint32_t(newIr[indexFD]*0.0566666f)>>2));
      
                    //undisorted
                    undisortedData[indexFD]  = colorByte2Int(uint32_t(newUndisorted[indexFD]*0.0566666f));
                    
                    
                    depthData[indexFD]  = colorByte2Int(uint32_t(depth*0.0566666f));
                    
                    //evaluates the depth XYZ position;
                   
                    depthCameraData[cameraXYZ++] = (indexX - dev->getIrCameraParams().cx) * depth / dev->getIrCameraParams().fx;//x
                    depthCameraData[cameraXYZ++] = (indexY - dev->getIrCameraParams().cy) * depth / dev->getIrCameraParams().fy; //y
                    depthCameraData[cameraXYZ++] = depth; //z
                    
                    indexX++;
                    if(indexX >= 512){ indexX=0; indexY++;}

                    indexFD++;
              //  }
            }
            
            
            //framw listener
            listener->release(frames);
        }
        
        //clean up
        if(newDepth != NULL) delete newDepth;
        if(newIr != NULL) delete newIr;
        if(newUndisorted != NULL) delete newUndisorted;
        
    }
int SoapyStreamEndpoint::acquireRecv(size_t &handle, const void **buffs, int &flags, long long &timeNs)
{
    int ret = 0;

    //no available handles, the user is hoarding them...
    if (_numHandlesAcquired == _buffData.size())
    {
        SoapySDR::logf(SOAPY_SDR_ERROR, "StreamEndpoint::acquireRecv() -- all buffers acquired");
        return SOAPY_SDR_STREAM_ERROR;
    }

    //grab the current handle
    handle = _nextHandleAcquire;
    auto &data = _buffData[handle];

    //receive into the buffer
    assert(not _streamSock.null());
    if (_datagramMode) ret = _streamSock.recv(data.buff.data(), data.buff.size());
    else ret = _streamSock.recv(data.buff.data(), HEADER_SIZE, MSG_WAITALL);
    if (ret < 0)
    {
        SoapySDR::logf(SOAPY_SDR_ERROR, "StreamEndpoint::acquireRecv(), FAILED %s", _streamSock.lastErrorMsg());
        return SOAPY_SDR_STREAM_ERROR;
    }
    size_t bytesRecvd = size_t(ret);
    _receiveInitial = true;

    //check the header
    auto header = (const StreamDatagramHeader*)data.buff.data();
    size_t bytes = ntohl(header->bytes);

    if (_datagramMode and bytes > bytesRecvd)
    {
        SoapySDR::logf(SOAPY_SDR_ERROR, "StreamEndpoint::acquireRecv(%d bytes), FAILED %d\n"
            "This MTU setting may be unachievable. Check network configuration.", int(bytes), ret);
        return SOAPY_SDR_STREAM_ERROR;
    }

    else while (bytesRecvd < bytes)
    {
        ret = _streamSock.recv(data.buff.data()+bytesRecvd, std::min<size_t>(SOAPY_REMOTE_SOCKET_BUFFMAX, bytes-bytesRecvd));
        if (ret < 0)
        {
            SoapySDR::logf(SOAPY_SDR_ERROR, "StreamEndpoint::acquireRecv(), FAILED %s", _streamSock.lastErrorMsg());
            return SOAPY_SDR_STREAM_ERROR;
        }
        bytesRecvd += size_t(ret);
    }

    const int numElemsOrErr = int(ntohl(header->elems));

    //dropped or out of order packets
    //TODO return an error code, more than a notification
    if (uint32_t(_lastRecvSequence) != uint32_t(ntohl(header->sequence)))
    {
        SoapySDR::log(SOAPY_SDR_SSI, "S");
    }

    //update flow control
    _lastRecvSequence = ntohl(header->sequence)+1;

    //has there been at least trigger window number of sequences since the last ACK?
    if (uint32_t(_lastRecvSequence-_lastSendSequence) >= _triggerAckWindow)
    {
        this->sendACK();
    }

    //increment for next handle
    if (numElemsOrErr >= 0)
    {
        data.acquired = true;
        _nextHandleAcquire = (_nextHandleAcquire + 1)%_numBuffs;
        _numHandlesAcquired++;
    }

    //set output parameters
    this->getAddrs(handle, (void **)buffs);
    flags = ntohl(header->flags);
    timeNs = ntohll(header->time);
    return numElemsOrErr;
}
Exemplo n.º 12
0
void CalculateProcessorTopology(CPUNumaNodes& out_nodes)
{
    out_nodes.clear();
#if defined(_WIN32)

    SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX buffer[KNOB_MAX_NUM_THREADS];
    DWORD bufSize = sizeof(buffer);

    BOOL ret = GetLogicalProcessorInformationEx(RelationProcessorCore, buffer, &bufSize);
    SWR_ASSERT(ret != FALSE, "Failed to get Processor Topology Information");

    uint32_t count = bufSize / buffer->Size;
    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX pBuffer = buffer;

    for (uint32_t i = 0; i < count; ++i)
    {
        SWR_ASSERT(pBuffer->Relationship == RelationProcessorCore);
        for (uint32_t g = 0; g < pBuffer->Processor.GroupCount; ++g)
        {
            auto& gmask = pBuffer->Processor.GroupMask[g];
            uint32_t threadId = 0;
            uint32_t procGroup = gmask.Group;

            Core* pCore = nullptr;

            uint32_t numThreads = (uint32_t)_mm_popcount_sizeT(gmask.Mask);

            while (BitScanForwardSizeT((unsigned long*)&threadId, gmask.Mask))
            {
                // clear mask
                gmask.Mask &= ~(KAFFINITY(1) << threadId);

                // Find Numa Node
                PROCESSOR_NUMBER procNum = {};
                procNum.Group = WORD(procGroup);
                procNum.Number = UCHAR(threadId);

                uint32_t numaId = 0;
                ret = GetNumaProcessorNodeEx(&procNum, (PUSHORT)&numaId);
                SWR_ASSERT(ret);

                // Store data
                if (out_nodes.size() <= numaId) out_nodes.resize(numaId + 1);
                auto& numaNode = out_nodes[numaId];

                uint32_t coreId = 0;

                if (nullptr == pCore)
                {
                    numaNode.cores.push_back(Core());
                    pCore = &numaNode.cores.back();
                    pCore->procGroup = procGroup;
#if !defined(_WIN64)
                    coreId = (uint32_t)numaNode.cores.size();
                    if ((coreId * numThreads) >= 32)
                    {
                        // Windows doesn't return threadIds >= 32 for a processor group correctly
                        // when running a 32-bit application.
                        // Just save -1 as the threadId
                        threadId = uint32_t(-1);
                    }
#endif
                }
                pCore->threadIds.push_back(threadId);
            }
        }
        pBuffer = PtrAdd(pBuffer, pBuffer->Size);
    }


#elif defined(__linux__) || defined (__gnu_linux__)

    // Parse /proc/cpuinfo to get full topology
    std::ifstream input("/proc/cpuinfo");
    std::string line;
    char* c;
    uint32_t threadId = uint32_t(-1);
    uint32_t coreId = uint32_t(-1);
    uint32_t numaId = uint32_t(-1);

    while (std::getline(input, line))
    {
        if (line.find("processor") != std::string::npos)
        {
            if (threadId != uint32_t(-1))
            {
                // Save information.
                if (out_nodes.size() <= numaId) out_nodes.resize(numaId + 1);
                auto& numaNode = out_nodes[numaId];
                if (numaNode.cores.size() <= coreId) numaNode.cores.resize(coreId + 1);
                auto& core = numaNode.cores[coreId];

                core.procGroup = coreId;
                core.threadIds.push_back(threadId);
            }

            auto data_start = line.find(": ") + 2;
            threadId = std::strtoul(&line.c_str()[data_start], &c, 10);
            continue;
        }
        if (line.find("core id") != std::string::npos)
        {
            auto data_start = line.find(": ") + 2;
            coreId = std::strtoul(&line.c_str()[data_start], &c, 10);
            continue;
        }
        if (line.find("physical id") != std::string::npos)
        {
            auto data_start = line.find(": ") + 2;
            numaId = std::strtoul(&line.c_str()[data_start], &c, 10);
            continue;
        }
    }

    if (threadId != uint32_t(-1))
    {
        // Save information.
        if (out_nodes.size() <= numaId) out_nodes.resize(numaId + 1);
        auto& numaNode = out_nodes[numaId];
        if (numaNode.cores.size() <= coreId) numaNode.cores.resize(coreId + 1);
        auto& core = numaNode.cores[coreId];

        core.procGroup = coreId;
        core.threadIds.push_back(threadId);
    }

    for (uint32_t node = 0; node < out_nodes.size(); node++) {
        auto& numaNode = out_nodes[node];
        auto it = numaNode.cores.begin();
        for ( ; it != numaNode.cores.end(); ) {
            if (it->threadIds.size() == 0)
                numaNode.cores.erase(it);
            else
                ++it;
        }
    }

#else

#error Unsupported platform

#endif
}
Exemplo n.º 13
0
void Layer::lockPageFlip(bool& recomputeVisibleRegions)
{
    if (mQueuedFrames > 0) {
        // Capture the old state of the layer for comparisons later
        const bool oldOpacity = isOpaque();
        sp<GraphicBuffer> oldActiveBuffer = mActiveBuffer;

        // signal another event if we have more frames pending
        if (android_atomic_dec(&mQueuedFrames) > 1) {
            mFlinger->signalEvent();
        }

        if (mSurfaceTexture->updateTexImage() < NO_ERROR) {
            // something happened!
            recomputeVisibleRegions = true;
            return;
        }

        updateLayerQcomFlags(LAYER_UPDATE_STATUS, true, mLayerQcomFlags);

        // update the active buffer
        mActiveBuffer = mSurfaceTexture->getCurrentBuffer();

        //Buffer validity changed. Reset HWC geometry flags.
        if(oldActiveBuffer == NULL && mActiveBuffer != NULL) {
            mFlinger->invalidateHwcGeometry();
        }

        const Rect crop(mSurfaceTexture->getCurrentCrop());
        const uint32_t transform(mSurfaceTexture->getCurrentTransform());
        const uint32_t scalingMode(mSurfaceTexture->getCurrentScalingMode());
        if ((crop != mCurrentCrop) ||
            (transform != mCurrentTransform) ||
            (scalingMode != mCurrentScalingMode))
        {
            mCurrentCrop = crop;
            mCurrentTransform = transform;
            mCurrentScalingMode = scalingMode;
            mFlinger->invalidateHwcGeometry();
        }

        GLfloat textureMatrix[16];
        mSurfaceTexture->getTransformMatrix(textureMatrix);
        if (memcmp(textureMatrix, mTextureMatrix, sizeof(textureMatrix))) {
            memcpy(mTextureMatrix, textureMatrix, sizeof(textureMatrix));
            mFlinger->invalidateHwcGeometry();
        }

        uint32_t bufWidth  = mActiveBuffer->getWidth();
        uint32_t bufHeight = mActiveBuffer->getHeight();
        if (oldActiveBuffer != NULL) {
            if (bufWidth != uint32_t(oldActiveBuffer->width) ||
                bufHeight != uint32_t(oldActiveBuffer->height)) {
                mFlinger->invalidateHwcGeometry();
            }
        }

        mCurrentOpacity = getOpacityForFormat(mActiveBuffer->format);
        if (oldOpacity != isOpaque()) {
            recomputeVisibleRegions = true;
        }

        glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

        // update the layer size if needed
        const Layer::State& front(drawingState());

        // FIXME: mPostedDirtyRegion = dirty & bounds
        mPostedDirtyRegion.set(front.w, front.h);

        if ((front.w != front.requested_w) ||
            (front.h != front.requested_h))
        {
            // check that we received a buffer of the right size
            // (Take the buffer's orientation into account)
            if (mCurrentTransform & Transform::ROT_90) {
                swap(bufWidth, bufHeight);
            }

            if (isFixedSize() ||
                    (bufWidth == front.requested_w &&
                    bufHeight == front.requested_h))
            {
                // Here we pretend the transaction happened by updating the
                // current and drawing states. Drawing state is only accessed
                // in this thread, no need to have it locked
                Layer::State& editDraw(mDrawingState);
                editDraw.w = editDraw.requested_w;
                editDraw.h = editDraw.requested_h;

                // We also need to update the current state so that we don't
                // end-up doing too much work during the next transaction.
                // NOTE: We actually don't need hold the transaction lock here
                // because State::w and State::h are only accessed from
                // this thread
                Layer::State& editTemp(currentState());
                editTemp.w = editDraw.w;
                editTemp.h = editDraw.h;

                // recompute visible region
                recomputeVisibleRegions = true;
            }

            LOGD_IF(DEBUG_RESIZE,
                    "lockPageFlip : "
                    "       (layer=%p), buffer (%ux%u, tr=%02x), "
                    "requested (%dx%d)",
                    this,
                    bufWidth, bufHeight, mCurrentTransform,
                    front.requested_w, front.requested_h);
        }
    } else {
        updateLayerQcomFlags(LAYER_UPDATE_STATUS, false, mLayerQcomFlags);
    }
}
Exemplo n.º 14
0
void WebMBufferedParser::Append(const unsigned char* aBuffer, uint32_t aLength,
                                nsTArray<WebMTimeDataOffset>& aMapping,
                                ReentrantMonitor& aReentrantMonitor)
{
  static const uint32_t SEGMENT_ID = 0x18538067;
  static const uint32_t SEGINFO_ID = 0x1549a966;
  static const uint32_t CLUSTER_ID = 0x1f43b675;
  static const uint32_t TIMECODESCALE_ID = 0x2ad7b1;
  static const unsigned char TIMECODE_ID = 0xe7;
  static const unsigned char BLOCK_ID = 0xa1;
  static const unsigned char SIMPLEBLOCK_ID = 0xa3;
  static const uint32_t BLOCK_TIMECODE_LENGTH = 2;

  const unsigned char* p = aBuffer;

  // Parse each byte in aBuffer one-by-one, producing timecodes and updating
  // aMapping as we go.  Parser pauses at end of stream (which may be at any
  // point within the parse) and resumes parsing the next time Append is
  // called with new data.
  while (p < aBuffer + aLength) {
    switch (mState) {
    case READ_ELEMENT_ID:
      mVIntRaw = true;
      mState = READ_VINT;
      mNextState = READ_ELEMENT_SIZE;
      break;
    case READ_ELEMENT_SIZE:
      mVIntRaw = false;
      mElement.mID = mVInt;
      mState = READ_VINT;
      mNextState = PARSE_ELEMENT;
      break;
    case PARSE_ELEMENT:
      mElement.mSize = mVInt;
      switch (mElement.mID.mValue) {
      case SEGMENT_ID:
        mState = READ_ELEMENT_ID;
        break;
      case SEGINFO_ID:
        mGotTimecodeScale = true;
        mState = READ_ELEMENT_ID;
        break;
      case TIMECODE_ID:
        mVInt = VInt();
        mVIntLeft = mElement.mSize.mValue;
        mState = READ_VINT_REST;
        mNextState = READ_CLUSTER_TIMECODE;
        break;
      case TIMECODESCALE_ID:
        mVInt = VInt();
        mVIntLeft = mElement.mSize.mValue;
        mState = READ_VINT_REST;
        mNextState = READ_TIMECODESCALE;
        break;
      case CLUSTER_ID:
        mClusterOffset = mCurrentOffset + (p - aBuffer) -
                        (mElement.mID.mLength + mElement.mSize.mLength);
        mState = READ_ELEMENT_ID;
        break;
      case SIMPLEBLOCK_ID:
        /* FALLTHROUGH */
      case BLOCK_ID:
        mBlockSize = mElement.mSize.mValue;
        mBlockTimecode = 0;
        mBlockTimecodeLength = BLOCK_TIMECODE_LENGTH;
        mBlockOffset = mCurrentOffset + (p - aBuffer) -
                       (mElement.mID.mLength + mElement.mSize.mLength);
        mState = READ_VINT;
        mNextState = READ_BLOCK_TIMECODE;
        break;
      default:
        mSkipBytes = mElement.mSize.mValue;
        mState = SKIP_DATA;
        mNextState = READ_ELEMENT_ID;
        break;
      }
      break;
    case READ_VINT: {
      unsigned char c = *p++;
      uint32_t mask;
      mVInt.mLength = VIntLength(c, &mask);
      mVIntLeft = mVInt.mLength - 1;
      mVInt.mValue = mVIntRaw ? c : c & ~mask;
      mState = READ_VINT_REST;
      break;
    }
    case READ_VINT_REST:
      if (mVIntLeft) {
        mVInt.mValue <<= 8;
        mVInt.mValue |= *p++;
        mVIntLeft -= 1;
      } else {
        mState = mNextState;
      }
      break;
    case READ_TIMECODESCALE:
      MOZ_ASSERT(mGotTimecodeScale);
      mTimecodeScale = mVInt.mValue;
      mState = READ_ELEMENT_ID;
      break;
    case READ_CLUSTER_TIMECODE:
      mClusterTimecode = mVInt.mValue;
      mState = READ_ELEMENT_ID;
      break;
    case READ_BLOCK_TIMECODE:
      if (mBlockTimecodeLength) {
        mBlockTimecode <<= 8;
        mBlockTimecode |= *p++;
        mBlockTimecodeLength -= 1;
      } else {
        // It's possible we've parsed this data before, so avoid inserting
        // duplicate WebMTimeDataOffset entries.
        {
          ReentrantMonitorAutoEnter mon(aReentrantMonitor);
          int64_t endOffset = mBlockOffset + mBlockSize +
                              mElement.mID.mLength + mElement.mSize.mLength;
          uint32_t idx = aMapping.IndexOfFirstElementGt(endOffset);
          if (idx == 0 || aMapping[idx - 1] != endOffset) {
            // Don't insert invalid negative timecodes.
            if (mBlockTimecode >= 0 || mClusterTimecode >= uint16_t(abs(mBlockTimecode))) {
              MOZ_ASSERT(mGotTimecodeScale);
              uint64_t absTimecode = mClusterTimecode + mBlockTimecode;
              absTimecode *= mTimecodeScale;
              WebMTimeDataOffset entry(endOffset, absTimecode, mClusterOffset);
              aMapping.InsertElementAt(idx, entry);
            }
          }
        }

        // Skip rest of block header and the block's payload.
        mBlockSize -= mVInt.mLength;
        mBlockSize -= BLOCK_TIMECODE_LENGTH;
        mSkipBytes = uint32_t(mBlockSize);
        mState = SKIP_DATA;
        mNextState = READ_ELEMENT_ID;
      }
      break;
    case SKIP_DATA:
      if (mSkipBytes) {
        uint32_t left = aLength - (p - aBuffer);
        left = std::min(left, mSkipBytes);
        p += left;
        mSkipBytes -= left;
      } else {
        mState = mNextState;
      }
      break;
    }
  }

  NS_ASSERTION(p == aBuffer + aLength, "Must have parsed to end of data.");
  mCurrentOffset += aLength;
}
Exemplo n.º 15
0
 void decode_freqs(std::vector<uint32_t>& out) const
 {
     out.resize(size);
     BlockCodec::decode(freqs_begin, out.data(),
                        uint32_t(-1), size);
 }
Exemplo n.º 16
0
  HexState operator() (const char*& Ptr, llvm::raw_ostream& Stream,
                       bool ForceHex) {
    // Block allocate the next chunk
    if (!(m_Buf.size() % kBufSize))
      m_Buf.reserve(m_Buf.size() + kBufSize);

    HexState State = kText;
    const char* const Start = Ptr;
    char32_t Char;
    if (m_Utf8) {
      Char = utf8::next(Ptr);
      if (Ptr > m_End) {
        // Invalid/bad encoding: dump the remaining as hex
        Ptr = Start;
        while (Ptr < m_End)
          Stream << "\\x" << llvm::format_hex_no_prefix(uint8_t(*Ptr++), 2);
        m_HexRun = true;
        return kHex;
      }
    } else
      Char = (*Ptr++ & 0xff);

    // Assume more often than not -regular- strings are printed
    if (LLVM_UNLIKELY(!isPrintable(Char, m_Loc))) {
      m_HexRun = false;
      if (LLVM_UNLIKELY(ForceHex || !std::isspace(wchar_t(Char), m_Loc))) {
        if (Char > 0xffff)
          Stream << "\\U" << llvm::format_hex_no_prefix(uint32_t(Char), 8);
        else if (Char > 0xff)
          Stream << "\\u" << llvm::format_hex_no_prefix(uint16_t(Char), 4);
        else if (Char) {
          Stream << "\\x" << llvm::format_hex_no_prefix(uint8_t(Char), 2);
          m_HexRun = true;
          return kHex;
        } else
          Stream << "\\0";
        return kText;
      }

      switch (Char) {
        case '\b': Stream << "\\b"; return kEsc;
        // \r isn't so great on Unix, what about Windows?
        case '\r': Stream << "\\r"; return kEsc;
        default: break;
      }
      State = kEsc;
    }

    if (m_HexRun) {
      // If the last print was a hex code, and this is now a char that could
      // be interpreted as a continuation of that hex-sequence, close out
      // the string and use concatenation. {'\xea', 'B'} -> "\xea" "B"
      m_HexRun = false;
      if (std::isxdigit(wchar_t(Char), m_Loc))
        Stream << "\" \"";
    }
    if (m_Utf8)
      Stream << llvm::StringRef(Start, Ptr-Start);
    else
      Stream << char(Char);
    return State;
  }
Exemplo n.º 17
0
SkyLinesTracking::FixPacket
SkyLinesTracking::ToFix(uint64_t key, const NMEAInfo &basic)
{
  assert(key != 0);
  assert(basic.time_available);

  FixPacket packet;
  packet.header.magic = ToBE32(MAGIC);
  packet.header.crc = 0;
  packet.header.type = ToBE16(Type::FIX);
  packet.header.key = ToBE64(key);
  packet.flags = 0;

  packet.time = ToBE32(uint32_t(basic.time * 1000));
  packet.reserved = 0;

  if (basic.location_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_LOCATION);
    ::GeoPoint location = basic.location;
    location.Normalize();
    packet.location.latitude = ToBE32(int(location.latitude.Degrees() * 1000000));
    packet.location.longitude = ToBE32(int(location.longitude.Degrees() * 1000000));
  } else
    packet.location.latitude = packet.location.longitude = 0;

  if (basic.track_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_TRACK);
    packet.track = ToBE16(uint16_t(basic.track.AsBearing().Degrees()));
  } else
    packet.track = 0;

  if (basic.ground_speed_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_GROUND_SPEED);
    packet.ground_speed = ToBE16(uint16_t(basic.ground_speed * 16));
  } else
    packet.ground_speed = 0;

  if (basic.airspeed_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_AIRSPEED);
    packet.airspeed = ToBE16(uint16_t(basic.indicated_airspeed * 16));
  } else
    packet.airspeed = 0;

  if (basic.baro_altitude_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_ALTITUDE);
    packet.altitude = ToBE16(int(basic.baro_altitude));
  } else if (basic.gps_altitude_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_ALTITUDE);
    packet.altitude = ToBE16(int(basic.gps_altitude));
  } else
    packet.altitude = 0;

  if (basic.total_energy_vario_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_VARIO);
    packet.vario = ToBE16(int(basic.total_energy_vario * 256));
  } else if (basic.netto_vario_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_VARIO);
    packet.vario = ToBE16(int(basic.netto_vario * 256));
  } else if (basic.noncomp_vario_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_VARIO);
    packet.vario = ToBE16(int(basic.noncomp_vario * 256));
  } else
    packet.vario = 0;

  if (basic.engine_noise_level_available) {
    packet.flags |= ToBE32(FixPacket::FLAG_ENL);
    packet.engine_noise_level = ToBE16(basic.engine_noise_level);
  } else
    packet.engine_noise_level = 0;

  packet.header.crc = ToBE16(UpdateCRC16CCITT(&packet, sizeof(packet), 0));
  return packet;
}
    readArgs(int argc, char** argv, TestParameters& tp, JournalParameters& sp)
#endif
    {
        static struct option long_options[] = {
            {"help", no_argument, 0, 'h'},

            // Test params
            {"num_msgs", required_argument, 0, 'm'},
            {"msg_size", required_argument, 0, 'S'},
            {"num_queues", required_argument, 0, 'q'},
            {"num_threads_per_queue", required_argument, 0, 't'},

            // Journal params
            {"jrnl_dir", required_argument, 0, 'd'},
            {"jrnl_base_filename", required_argument, 0, 'b'},
            {"num_jfiles", required_argument, 0, 'f'},
            {"jfsize_sblks", required_argument, 0, 's'},
            {"auto_expand", no_argument, 0, 'a'},
            {"ae_max_jfiles", required_argument, 0, 'e'},
            {"wcache_num_pages", required_argument, 0, 'p'},
            {"wcache_pgsize_sblks", required_argument, 0, 'c'},

            {0, 0, 0, 0}
        };

        bool err = false;
        int c = 0;
        while (true) {
            int option_index = 0;
            c = getopt_long(argc, argv, "ab:c:d:e:f:hm:p:q:s:S:t:", long_options, &option_index);
            if (c == -1) break;
            switch (c) {
                // Test params
                case 'm':
                    tp._numMsgs = uint32_t(std::atol(optarg));
                    break;
                case 'S':
                    tp._msgSize = uint32_t(std::atol(optarg));
                    break;
                case 'q':
                    tp._numQueues = uint16_t(std::atoi(optarg));
                    break;
                case 't':
                    tp._numThreadPairsPerQueue = uint16_t(std::atoi(optarg));
                    break;

                // Store params
                case 'd':
                    sp._jrnlDir.assign(optarg);
                    break;
                case 'b':
                    sp._jrnlBaseFileName.assign(optarg);
                    break;
                case 'f':
                    sp._numJrnlFiles = uint16_t(std::atoi(optarg));
                    break;
                case 's':
                    sp._jrnlFileSize_sblks = uint32_t(std::atol(optarg));
                    break;
                case 'a':
                    sp._autoExpand = true;
                    break;
                case 'e':
                    sp._autoExpandMaxJrnlFiles = uint16_t(std::atoi(optarg));
                    break;
                case 'p':
                    sp._writeBuffNumPgs = uint16_t(std::atoi(optarg));
                    break;
                case 'c':
                    sp._writeBuffPgSize_sblks = uint32_t(std::atol(optarg));
                    break;

                // Other
                case 'h':
                default:
                    err = true;
                    printArgs();
            }
        }
        return err;
    }
Exemplo n.º 19
0
int32_t
CompareUTF8toUTF16(const nsASingleFragmentCString& aUTF8String,
                   const nsASingleFragmentString& aUTF16String)
{
  static const uint32_t NOT_ASCII = uint32_t(~0x7F);

  const char* u8;
  const char* u8end;
  aUTF8String.BeginReading(u8);
  aUTF8String.EndReading(u8end);

  const char16_t* u16;
  const char16_t* u16end;
  aUTF16String.BeginReading(u16);
  aUTF16String.EndReading(u16end);

  while (u8 != u8end && u16 != u16end) {
    // Cast away the signedness of *u8 to prevent signextension when
    // converting to uint32_t
    uint32_t c8_32 = (uint8_t)*u8;

    if (c8_32 & NOT_ASCII) {
      bool err;
      c8_32 = UTF8CharEnumerator::NextChar(&u8, u8end, &err);
      if (err) {
        return INT32_MIN;
      }

      uint32_t c16_32 = UTF16CharEnumerator::NextChar(&u16, u16end);
      // The above UTF16CharEnumerator::NextChar() calls can
      // fail, but if it does for anything other than no data to
      // look at (which can't happen here), it returns the
      // Unicode replacement character 0xFFFD for the invalid
      // data they were fed. Ignore that error and treat invalid
      // UTF16 as 0xFFFD.
      //
      // This matches what our UTF16 to UTF8 conversion code
      // does, and thus a UTF8 string that came from an invalid
      // UTF16 string will compare equal to the invalid UTF16
      // string it came from. Same is true for any other UTF16
      // string differs only in the invalid part of the string.

      if (c8_32 != c16_32) {
        return c8_32 < c16_32 ? -1 : 1;
      }
    } else {
      if (c8_32 != *u16) {
        return c8_32 > *u16 ? 1 : -1;
      }

      ++u8;
      ++u16;
    }
  }

  if (u8 != u8end) {
    // We get to the end of the UTF16 string, but no to the end of
    // the UTF8 string. The UTF8 string is longer than the UTF16
    // string

    return 1;
  }

  if (u16 != u16end) {
    // We get to the end of the UTF8 string, but no to the end of
    // the UTF16 string. The UTF16 string is longer than the UTF8
    // string

    return -1;
  }

  // The two strings match.

  return 0;
}
Exemplo n.º 20
0
bool WebMBufferedParser::Append(const unsigned char* aBuffer, uint32_t aLength,
                                nsTArray<WebMTimeDataOffset>& aMapping,
                                ReentrantMonitor& aReentrantMonitor)
{
  static const uint32_t EBML_ID = 0x1a45dfa3;
  static const uint32_t SEGMENT_ID = 0x18538067;
  static const uint32_t SEGINFO_ID = 0x1549a966;
  static const uint32_t TRACKS_ID = 0x1654AE6B;
  static const uint32_t CLUSTER_ID = 0x1f43b675;
  static const uint32_t TIMECODESCALE_ID = 0x2ad7b1;
  static const unsigned char TIMECODE_ID = 0xe7;
  static const unsigned char BLOCKGROUP_ID = 0xa0;
  static const unsigned char BLOCK_ID = 0xa1;
  static const unsigned char SIMPLEBLOCK_ID = 0xa3;
  static const uint32_t BLOCK_TIMECODE_LENGTH = 2;

  static const unsigned char CLUSTER_SYNC_ID[] = { 0x1f, 0x43, 0xb6, 0x75 };

  const unsigned char* p = aBuffer;

  // Parse each byte in aBuffer one-by-one, producing timecodes and updating
  // aMapping as we go.  Parser pauses at end of stream (which may be at any
  // point within the parse) and resumes parsing the next time Append is
  // called with new data.
  while (p < aBuffer + aLength) {
    switch (mState) {
    case READ_ELEMENT_ID:
      mVIntRaw = true;
      mState = READ_VINT;
      mNextState = READ_ELEMENT_SIZE;
      break;
    case READ_ELEMENT_SIZE:
      mVIntRaw = false;
      mElement.mID = mVInt;
      mState = READ_VINT;
      mNextState = PARSE_ELEMENT;
      break;
    case FIND_CLUSTER_SYNC:
      if (*p++ == CLUSTER_SYNC_ID[mClusterSyncPos]) {
        mClusterSyncPos += 1;
      } else {
        mClusterSyncPos = 0;
      }
      if (mClusterSyncPos == sizeof(CLUSTER_SYNC_ID)) {
        mVInt.mValue = CLUSTER_ID;
        mVInt.mLength = sizeof(CLUSTER_SYNC_ID);
        mState = READ_ELEMENT_SIZE;
      }
      break;
    case PARSE_ELEMENT:
      mElement.mSize = mVInt;
      switch (mElement.mID.mValue) {
      case SEGMENT_ID:
        mState = READ_ELEMENT_ID;
        break;
      case SEGINFO_ID:
        mGotTimecodeScale = true;
        mState = READ_ELEMENT_ID;
        break;
      case TIMECODE_ID:
        mVInt = VInt();
        mVIntLeft = mElement.mSize.mValue;
        mState = READ_VINT_REST;
        mNextState = READ_CLUSTER_TIMECODE;
        break;
      case TIMECODESCALE_ID:
        mVInt = VInt();
        mVIntLeft = mElement.mSize.mValue;
        mState = READ_VINT_REST;
        mNextState = READ_TIMECODESCALE;
        break;
      case CLUSTER_ID:
        mClusterOffset = mCurrentOffset + (p - aBuffer) -
                        (mElement.mID.mLength + mElement.mSize.mLength);
        // Handle "unknown" length;
        if (mElement.mSize.mValue + 1 != uint64_t(1) << (mElement.mSize.mLength * 7)) {
          mClusterEndOffset = mClusterOffset + mElement.mID.mLength + mElement.mSize.mLength + mElement.mSize.mValue;
        } else {
          mClusterEndOffset = -1;
        }
        mGotClusterTimecode = false;
        mState = READ_ELEMENT_ID;
        break;
      case BLOCKGROUP_ID:
        mState = READ_ELEMENT_ID;
        break;
      case SIMPLEBLOCK_ID:
        /* FALLTHROUGH */
      case BLOCK_ID:
        if (!mGotClusterTimecode) {
          WEBM_DEBUG("The Timecode element must appear before any Block or "
                     "SimpleBlock elements in a Cluster");
          return false;
        }
        mBlockSize = mElement.mSize.mValue;
        mBlockTimecode = 0;
        mBlockTimecodeLength = BLOCK_TIMECODE_LENGTH;
        mBlockOffset = mCurrentOffset + (p - aBuffer) -
                       (mElement.mID.mLength + mElement.mSize.mLength);
        mState = READ_VINT;
        mNextState = READ_BLOCK_TIMECODE;
        break;
      case TRACKS_ID:
        mSkipBytes = mElement.mSize.mValue;
        mState = CHECK_INIT_FOUND;
        break;
      case EBML_ID:
        mLastInitStartOffset = mCurrentOffset + (p - aBuffer) -
                            (mElement.mID.mLength + mElement.mSize.mLength);
        MOZ_FALLTHROUGH;
      default:
        mSkipBytes = mElement.mSize.mValue;
        mState = SKIP_DATA;
        mNextState = READ_ELEMENT_ID;
        break;
      }
      break;
    case READ_VINT: {
      unsigned char c = *p++;
      uint32_t mask;
      mVInt.mLength = VIntLength(c, &mask);
      mVIntLeft = mVInt.mLength - 1;
      mVInt.mValue = mVIntRaw ? c : c & ~mask;
      mState = READ_VINT_REST;
      break;
    }
    case READ_VINT_REST:
      if (mVIntLeft) {
        mVInt.mValue <<= 8;
        mVInt.mValue |= *p++;
        mVIntLeft -= 1;
      } else {
        mState = mNextState;
      }
      break;
    case READ_TIMECODESCALE:
      if (!mGotTimecodeScale) {
        WEBM_DEBUG("Should get the SegmentInfo first");
        return false;
      }
      mTimecodeScale = mVInt.mValue;
      mState = READ_ELEMENT_ID;
      break;
    case READ_CLUSTER_TIMECODE:
      mClusterTimecode = mVInt.mValue;
      mGotClusterTimecode = true;
      mState = READ_ELEMENT_ID;
      break;
    case READ_BLOCK_TIMECODE:
      if (mBlockTimecodeLength) {
        mBlockTimecode <<= 8;
        mBlockTimecode |= *p++;
        mBlockTimecodeLength -= 1;
      } else {
        // It's possible we've parsed this data before, so avoid inserting
        // duplicate WebMTimeDataOffset entries.
        {
          ReentrantMonitorAutoEnter mon(aReentrantMonitor);
          int64_t endOffset = mBlockOffset + mBlockSize +
                              mElement.mID.mLength + mElement.mSize.mLength;
          uint32_t idx = aMapping.IndexOfFirstElementGt(endOffset);
          if (idx == 0 || aMapping[idx - 1] != endOffset) {
            // Don't insert invalid negative timecodes.
            if (mBlockTimecode >= 0 || mClusterTimecode >= uint16_t(abs(mBlockTimecode))) {
              if (!mGotTimecodeScale) {
                WEBM_DEBUG("Should get the TimecodeScale first");
                return false;
              }
              uint64_t absTimecode = mClusterTimecode + mBlockTimecode;
              absTimecode *= mTimecodeScale;
              // Avoid creating an entry if the timecode is out of order
              // (invalid according to the WebM specification) so that
              // ordering invariants of aMapping are not violated.
              if (idx == 0 ||
                  aMapping[idx - 1].mTimecode <= absTimecode ||
                  (idx + 1 < aMapping.Length() &&
                   aMapping[idx + 1].mTimecode >= absTimecode)) {
                WebMTimeDataOffset entry(endOffset, absTimecode, mLastInitStartOffset,
                                         mClusterOffset, mClusterEndOffset);
                aMapping.InsertElementAt(idx, entry);
              } else {
                WEBM_DEBUG("Out of order timecode %" PRIu64 " in Cluster at %" PRId64 " ignored",
                           absTimecode, mClusterOffset);
              }
            }
          }
        }

        // Skip rest of block header and the block's payload.
        mBlockSize -= mVInt.mLength;
        mBlockSize -= BLOCK_TIMECODE_LENGTH;
        mSkipBytes = uint32_t(mBlockSize);
        mState = SKIP_DATA;
        mNextState = READ_ELEMENT_ID;
      }
      break;
    case SKIP_DATA:
      if (mSkipBytes) {
        uint32_t left = aLength - (p - aBuffer);
        left = std::min(left, mSkipBytes);
        p += left;
        mSkipBytes -= left;
      }
      if (!mSkipBytes) {
        mBlockEndOffset = mCurrentOffset + (p - aBuffer);
        mState = mNextState;
      }
      break;
    case CHECK_INIT_FOUND:
      if (mSkipBytes) {
        uint32_t left = aLength - (p - aBuffer);
        left = std::min(left, mSkipBytes);
        p += left;
        mSkipBytes -= left;
      }
      if (!mSkipBytes) {
        if (mInitEndOffset < 0) {
          mInitEndOffset = mCurrentOffset + (p - aBuffer);
          mBlockEndOffset = mCurrentOffset + (p - aBuffer);
        }
        mState = READ_ELEMENT_ID;
      }
      break;
    }
  }

  NS_ASSERTION(p == aBuffer + aLength, "Must have parsed to end of data.");
  mCurrentOffset += aLength;

  return true;
}
Exemplo n.º 21
0
/////////////////////////////////////////////////////
// Handle a protocol level packet. This could be either a top-level
// EQUDPIPPacket or a subpacket that is just an EQProtocolPacket. Either way
// we use net opcodes here.
void EQPacketStream::processPacket(EQProtocolPacket& packet, bool isSubpacket)
{
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
  seqDebug("-->EQPacketStream::processPacket, subpacket=%s on stream %s (%d)",
    (isSubpacket ? "true" : "false"), EQStreamStr[m_streamid], m_streamid);
#endif

  if (IS_APP_OPCODE(packet.getNetOpCode()))
  {
    // This is an app-opcode directly on the wire with no wrapping protocol
    // information. Weird, but whatever gets the stream read, right?
	dispatchPacket(packet.payload(), packet.payloadLength(), 
      packet.getNetOpCode(), m_opcodeDB.find(packet.getNetOpCode()));
    return;
  }

  // Process the net opcode
  switch (packet.getNetOpCode())
  {
    case OP_Combined:
    {
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
      seqDebug("EQPacket: found combined packet (net op: %04x, size %d) on stream %s (%d). Unrolling.", 
        packet.getNetOpCode(), packet.payloadLength(), 
        EQStreamStr[m_streamid], m_streamid);
#endif

      // Rolled up multiple packets inside this packet. Need to unroll them
      // and process them individually. subpacket starts after the net opcode.
      uint8_t* subpacket = packet.payload();

      while (subpacket < packet.payload() + packet.payloadLength())
      {
        // Length specified first on the wire.
        uint8_t subpacketLength = subpacket[0];

        // Move past the length
        subpacket++;

        // OpCode (in net order)
        uint16_t subOpCode = *(uint16_t*)subpacket;
        
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
        seqDebug("EQPacket: unrolling length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
          subpacketLength, EQStreamStr[m_streamid], m_streamid, subOpCode);
#endif
        
        // Opcode is next. Net opcode or app opcode?
        if (IS_NET_OPCODE(subOpCode))
        {
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
          seqDebug("EQPacket: processing unrolled net opcode, length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
            subpacketLength, EQStreamStr[m_streamid], m_streamid, subOpCode);
#endif

          // Net opcode. false = copy. true = subpacket
          EQProtocolPacket spacket(subpacket, subpacketLength, false, true);

          processPacket(spacket, true);
        }
        else
        {
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
        seqDebug("EQPacket: processing unrolled app opcode, length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
          subpacketLength-2, EQStreamStr[m_streamid], m_streamid, subOpCode);
#endif

          // App opcode. Dispatch it, skipping opcode.
          dispatchPacket(&subpacket[2], subpacketLength-2, 
            subOpCode, m_opcodeDB.find(subOpCode));
        }
        subpacket += subpacketLength;
      }
    }
    break;
    case OP_AppCombined:
    {
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
      seqDebug("EQPacket: found appcombined packet (net op: %04x, size %d) on stream %s (%d). Unrolling.", 
        packet.getNetOpCode(), packet.payloadLength(), 
        EQStreamStr[m_streamid], m_streamid);
#endif

      // Multiple app op codes in the same packet. Need to unroll and dispatch
      // them.
      uint8_t* subpacket = packet.payload();

      while (subpacket < packet.payload() + packet.payloadLength())
      {
        // Length specified first on the wire.
        uint8_t subpacketLength = subpacket[0];

        // Move past the length
        subpacket++;

        if (subpacketLength != 0xff)
        {
          // Dispatch app op code using given packet length. Net order!
          uint16_t subOpCode = *(uint16_t*)(subpacket);

#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
        seqDebug("EQPacket: unrolling length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
          subpacketLength, EQStreamStr[m_streamid], m_streamid, subOpCode);
        seqDebug("EQPacket: processing unrolled app opcode, length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
          subpacketLength-2, EQStreamStr[m_streamid], m_streamid, subOpCode);
#endif

          // Dispatch, skipping op code.
          dispatchPacket(&subpacket[2], subpacketLength-2, 
            subOpCode, m_opcodeDB.find(subOpCode));

          // Move ahead
          subpacket += subpacketLength;
        }
        else
        {
          // If original length is 0xff, it means it is a long one. The length
          // is 2 bytes and next.
          uint16_t longOne = eqntohuint16(subpacket);
 
          // Move past the 2 byte length
          subpacket += 2;

          // OpCode next. Net order for op codes.
          uint16_t subOpCode = *(uint16_t*)subpacket;
          
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
        seqDebug("EQPacket: unrolling length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
          longOne, EQStreamStr[m_streamid], m_streamid, subOpCode);
        seqDebug("EQPacket: processing unrolled app opcode, length %d bytes from combined packet on stream %s (%d). Opcode %04x", 
          longOne-2, EQStreamStr[m_streamid], m_streamid, subOpCode);
#endif

          // Dispatch, skipping op code.
          dispatchPacket(&subpacket[2], longOne-2, 
            subOpCode, m_opcodeDB.find(subOpCode));

          // Move ahead
          subpacket += longOne;
        }
      }
    }
    break;
    case OP_Packet:
    {
      // Normal unfragmented sequenced packet.
      uint16_t seq = packet.arqSeq();
      emit seqReceive(seq, (int)m_streamid);

      if (seq >= m_arqSeqExp)
      {
        // Future packet?
        if (seq == m_arqSeqExp)
        {
          // Expected packet.
          m_arqSeqExp++;
          emit seqExpect(m_arqSeqExp, (int)m_streamid);

          // OpCode next. Net order for op codes.
          uint16_t subOpCode = *(uint16_t*)(packet.payload());
       
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 1)
          seqDebug("SEQ: Found next sequence number in data stream %s (%d), incrementing expected seq, %04x (op code %04x, sub opcode %04x)", 
	        EQStreamStr[m_streamid], m_streamid, seq, 
            packet.getNetOpCode(), subOpCode);
#endif

          // App opcode or net opcode?
          if (IS_NET_OPCODE(subOpCode))
          {
            // Net opcode. false = no copy. true = subpacket.
            EQProtocolPacket spacket(packet.payload(), 
              packet.payloadLength(), false, true);

            processPacket(spacket, true);
          }
          else
          {
            // App opcode. Dispatch, skipping opcode.
            dispatchPacket(&packet.payload()[2], packet.payloadLength()-2,
              subOpCode, m_opcodeDB.find(subOpCode));
          }
        }
        else if (seq < (uint32_t(m_arqSeqExp + arqSeqWrapCutoff)) ||
                 seq < (int32_t(m_arqSeqExp - arqSeqWrapCutoff)))
        {
          // Yeah, future packet. Push it on the packet cache.
#ifdef PACKET_PROCESS_DIAG
          seqDebug("SEQ: out of order sequence %04x stream %s (%d) expecting %04x, sending to cache, %04d",
	        seq, EQStreamStr[m_streamid], m_streamid, 
            m_arqSeqExp, m_cache.size());
#endif
          setCache(seq, packet);
        }
        else
        {
          // Past packet outside the cut off
          seqWarn("SEQ: received sequenced %spacket outside the bounds of reasonableness on stream %s (%d) netopcode=%04x size=%d. Expecting seq=%04x got seq=%04x, reasonableness being %d in the future.", 
            (isSubpacket ? "sub" : ""),
            EQStreamStr[m_streamid], m_streamid,
            packet.getNetOpCode(), packet.payloadLength(), 
            m_arqSeqExp, seq, arqSeqWrapCutoff);
        }
      }
      else
      {
        // Spooky packet from the past. Boo!
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 1)
        seqDebug("discarding %spacket netopcode=%04x seq=%d size=%d on stream %s (%d). Packet is in the past. We've moved on.",
          (isSubpacket ? "sub" : ""),
          packet.getNetOpCode(), seq, packet.payloadLength(), 
          EQStreamStr[m_streamid], m_streamid);
#endif
      }
    }
    break;
    case OP_Oversized:
    {
      // Fragmented sequenced data packet.
      uint16_t seq = packet.arqSeq();
      emit seqReceive(seq, (int)m_streamid);

      if (seq >= m_arqSeqExp)
      {
        // Future packet?
        if (seq == m_arqSeqExp)
        {
          // Expected packet.
          m_arqSeqExp++;
          emit seqExpect(m_arqSeqExp, (int)m_streamid);
       
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 1)
          seqDebug("SEQ: Found next sequence number in data stream %s (%d), incrementing expected seq, %04x (op code %04x)", 
	        EQStreamStr[m_streamid], m_streamid, seq, packet.getNetOpCode());
#endif

          // Push the fragment on.
          m_fragment.addFragment(packet);

          if (m_fragment.isComplete())
          {
            // OpCode from fragment. In network order.
            uint16_t fragOpCode = *(uint16_t*)(m_fragment.data());

#ifdef PACKET_PROCESS_DIAG
          seqDebug("SEQ: Completed oversized app packet on stream %s with seq %04x, total size %d opcode %04x", 
	        EQStreamStr[m_streamid], seq, m_fragment.size()-2, fragOpCode);
#endif

            // dispatch fragment. Skip opcode.
            dispatchPacket(&m_fragment.data()[2], m_fragment.size()-2,
              fragOpCode, m_opcodeDB.find(fragOpCode)); 

            m_fragment.reset();
          }
        }
        else if (seq < (uint32_t(m_arqSeqExp + arqSeqWrapCutoff)) ||
                 seq < (int32_t(m_arqSeqExp - arqSeqWrapCutoff)))
        {
          // Yeah, future packet. Push it on the packet cache.
#ifdef PACKET_PROCESS_DIAG
          seqDebug("SEQ: out of order sequence %04x stream %s (%d) expecting %04x, sending to cache, %04d",
	        seq, EQStreamStr[m_streamid], m_streamid, 
            m_arqSeqExp, m_cache.size());
#endif
          setCache(seq, packet);
        }
        else
        {
          // Past packet outside the cut off
          seqWarn("SEQ: received sequenced %spacket outside the bounds of reasonableness on stream %s (%d) netopcode=%04x size=%d. Expecting seq=%04x got seq=%04x, reasonableness being %d in the future.", 
            (isSubpacket ? "sub" : ""),
            EQStreamStr[m_streamid], m_streamid,
            packet.getNetOpCode(), packet.payloadLength(), 
            m_arqSeqExp, seq, arqSeqWrapCutoff);
        }
      }
      else
      {
        // Spooky packet from the past. Boo!
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 1)
        seqDebug("discarding packet netopcode=%04x seq=%04x size=%d on stream %s (%d). Packet is in the past. We've moved on, expecting %04x.",
          packet.getNetOpCode(), seq, packet.payloadLength(), 
          EQStreamStr[m_streamid], m_streamid, m_arqSeqExp);
#endif
      }
    }
    break;
    case OP_SessionRequest:
    {
      // Session request from client to server.
#if defined(PACKET_PROCESS_DIAG) || defined(PACKET_SESSION_DIAG)
      seqDebug("EQPacket: SessionRequest found, resetting expected seq, stream %s (%d) (session tracking %s)",
	    EQStreamStr[m_streamid], m_streamid,
        (m_session_tracking_enabled == 2 ? "locked on" : 
          (m_session_tracking_enabled == 1 ? "enabled" : "disabled")));
#endif
      
      // Pull off session request information
      SessionRequestStruct* request = (SessionRequestStruct*) packet.payload();

      m_sessionId = eqntohuint32((uint8_t*)&(request->sessionId));
      m_maxLength = eqntohuint32((uint8_t*)&(request->maxLength));

#if defined(PACKET_SESSION_DIAG)
      seqDebug("EQPacket: SessionRequest %s:%u->%s:%u, sessionId %u maxLength %u, awaiting key for stream %s (%d)",
        ((EQUDPIPPacketFormat&) packet).getIPv4SourceA().ascii(),
        ((EQUDPIPPacketFormat&) packet).getSourcePort(),
        ((EQUDPIPPacketFormat&) packet).getIPv4DestA().ascii(),
        ((EQUDPIPPacketFormat&) packet).getDestPort(),
        m_sessionId, m_maxLength, EQStreamStr[m_streamid], m_streamid);
#endif

#if defined(PACKET_SESSION_DIAG) && (PACKET_SESSION_DIAG > 1)
      seqDebug("EQPacket: SessionRequest contents: unknown %u, sessionId %u, maxLength %u",
        eqntohuint32((uint8_t*)&(request->unknown0000)), 
        m_sessionId, m_maxLength);
#endif

#if defined(PACKET_SESSION_DIAG) && (PACKET_SESSION_DIAG > 2)
      seqDebug("EQPacket: Raw SessionRequest: %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x",
        packet.payload()[0], packet.payload()[1], packet.payload()[2], 
        packet.payload()[3], packet.payload()[4], packet.payload()[5], 
        packet.payload()[6], packet.payload()[7], packet.payload()[8], 
        packet.payload()[9], packet.payload()[10], packet.payload()[11]);
#endif

      m_arqSeqExp = 0;
      m_arqSeqFound = true;
    }
    break;
    case OP_SessionResponse:
    {
      // Session response from server
#if defined(PACKET_PROCESS_DIAG) || defined(PACKET_SESSION_DIAG)
      seqDebug("EQPacket: SessionResponse found %s:%u->%s:%u, resetting expected seq, stream %s (%d) (session tracking %s)",
        ((EQUDPIPPacketFormat&) packet).getIPv4SourceA().ascii(),
        ((EQUDPIPPacketFormat&) packet).getSourcePort(),
        ((EQUDPIPPacketFormat&) packet).getIPv4DestA().ascii(),
        ((EQUDPIPPacketFormat&) packet).getDestPort(),
	    EQStreamStr[m_streamid], m_streamid,
        (m_session_tracking_enabled == 2 ? "locked on" : 
          (m_session_tracking_enabled == 1 ? "enabled" : "disabled")));
#endif
      
      // Pull off session response information
      SessionResponseStruct* response = 
        (SessionResponseStruct*) packet.payload();

      m_maxLength = eqntohuint32((uint8_t*)&(response->maxLength));
      m_sessionKey = eqntohuint32((uint8_t*)&(response->key));
      m_sessionId = eqntohuint32((uint8_t*)&(response->sessionId));

#if defined(PACKET_SESSION_DIAG)
      seqDebug("EQPacket: SessionResponse sessionId %u maxLength %u, key is %u for stream %s (%d)",
        m_sessionId, m_maxLength, m_sessionKey, 
        EQStreamStr[m_streamid], m_streamid);
#endif

#if defined(PACKET_SESSION_DIAG) && (PACKET_SESSION_DIAG > 1)
      seqDebug("EQPacket: SessionResponse contents: sessionId %u, key %u, unknown %u, unknown %u, maxLength %u, unknown %u",
        m_sessionId, m_sessionKey, 
        eqntohuint16((uint8_t*)&(response->unknown0008)),
        response->unknown0010, m_maxLength,
        eqntohuint32((uint8_t*) &(response->unknown0015)));
#endif

#if defined(PACKET_SESSION_DIAG) && (PACKET_SESSION_DIAG > 2)
      seqDebug("EQPacket: Raw SessionResponse: %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x%02x %02x",
        packet.payload()[0], packet.payload()[1], packet.payload()[2], 
        packet.payload()[3], packet.payload()[4], packet.payload()[5], 
        packet.payload()[6], packet.payload()[7], packet.payload()[8], 
        packet.payload()[9], packet.payload()[10], packet.payload()[11],
        packet.payload()[12], packet.payload()[13], packet.payload()[14], 
        packet.payload()[15], packet.payload()[16], packet.payload()[17], 
        packet.payload()[18]);
#endif

      // Provide key to corresponding stream from this session/stream
      emit sessionKey(m_sessionId, m_streamid, m_sessionKey);

      m_arqSeqExp = 0;
      m_arqSeqFound = true;

      // Session tracking
      if (m_session_tracking_enabled)
      {
        // If this is the world server talking to us, reset session tracking if
        // it is on so we unlatch the client in case of getting kicked.
        if (m_streamid == world2client)
        {
          m_session_tracking_enabled = 1;
          emit sessionTrackingChanged(m_session_tracking_enabled);
        }
        // If this is the zone server talking to us, close the latch and lock
        else if (m_streamid == zone2client)
        {
          // SessionResponse should always be an outer protocol packet, so
          // the EQProtocolPacket passed in can be cast back to
          // EQUDPIPPacketFormat, which we need to go to get access to the IP
          // headers!
          m_session_tracking_enabled = 2;
  
          emit lockOnClient(((EQUDPIPPacketFormat&) packet).getSourcePort(), 
            ((EQUDPIPPacketFormat&) packet).getDestPort());
          emit sessionTrackingChanged(m_session_tracking_enabled);
        }
      }
    }
    break;
    case OP_SessionDisconnect:
    {
#if defined(PACKET_PROCESS_DIAG) || defined(PACKET_SESSION_DIAG)
      seqDebug("EQPacket: SessionDisconnect found %s:%u->%s:%u, resetting expected seq, stream %s (%d) (session tracking %s)",
        ((EQUDPIPPacketFormat&) packet).getIPv4SourceA().ascii(),
        ((EQUDPIPPacketFormat&) packet).getSourcePort(),
        ((EQUDPIPPacketFormat&) packet).getIPv4DestA().ascii(),
        ((EQUDPIPPacketFormat&) packet).getDestPort(),
	    EQStreamStr[m_streamid], m_streamid,
        (m_session_tracking_enabled == 2 ? "locked on" : 
          (m_session_tracking_enabled == 1 ? "enabled" : "disabled")));
#endif

#if defined(PACKET_SESSION_DIAG) && (PACKET_SESSION_DIAG > 2)
      seqDebug("EQPacket: Raw SessionDisconnect: %02x%02x %02x%02x %02x%02x %02x%02x",
        packet.payload()[0], packet.payload()[1], packet.payload()[2], 
        packet.payload()[3], packet.payload()[4], packet.payload()[5], 
        packet.payload()[6], packet.payload()[7]);
#endif

      m_arqSeqExp = 0;

      // Clear cache
      resetCache();

      // Signal closing. Unlatch session tracking if it is on.
      if (m_session_tracking_enabled)
      {
        m_session_tracking_enabled = 1;
        emit sessionTrackingChanged(m_session_tracking_enabled);
      }

      emit closing();
    }
    break;
    case OP_Ack:
    case OP_AckFuture:
    case OP_AckAfterDisconnect:
    {
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
      seqDebug("EQPacket: no-op on for net opcode %04x seq %04x, stream %s (%d)",
	    packet.getNetOpCode(), eqntohuint16(packet.payload()), 
        EQStreamStr[m_streamid], m_streamid);
#endif
    }
    break;
    case OP_KeepAlive:
    case OP_SessionStatRequest:
    case OP_SessionStatResponse:
    {
#if defined(PACKET_PROCESS_DIAG) && (PACKET_PROCESS_DIAG > 2)
      seqDebug("EQPacket: no-op on for net opcode %04x, stream %s (%d)",
	    packet.getNetOpCode(), EQStreamStr[m_streamid], m_streamid);
#endif
    }
    break;
    default :
    {
      seqWarn("EQPacket: Unhandled net opcode %04x, stream %s, size %d",
        packet.getNetOpCode(), EQStreamStr[m_streamid], packet.payloadLength());
    }
  }
}
Exemplo n.º 22
0
// Copyright (C) 2017 Vicente J. Botet Escriba
//
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Based on https://github.com/akrzemi1/explicit/blob/master/test/test_explicit.cpp

// <experimental/numerics/v1/numbers/double_wide_arithmetic.hpp>
#include <boost/detail/lightweight_test.hpp>
#include <experimental/numerics/v1/numbers/double_wide_arithmetic.hpp>
#include <string>

namespace stdex = std::experimental;
namespace nmx = std::experimental::numerics;

static_assert(std::is_same<decltype(nmx::wide_neg(int32_t(1))), int64_t>::value, "error");
static_assert(std::is_same<decltype(nmx::wide_neg(uint32_t(1))), uint64_t>::value, "error");

static_assert(std::is_same<decltype(nmx::wide_add(int32_t(1), int32_t(1))), int64_t>::value, "error");
static_assert(std::is_same<decltype(nmx::wide_add(uint32_t(1), uint32_t(1))), uint64_t>::value, "error");

static_assert(std::is_same<decltype(nmx::wide_sub(int32_t(1), int32_t(1))), int64_t>::value, "error");
static_assert(std::is_same<decltype(nmx::wide_sub(uint32_t(1), uint32_t(1))), uint64_t>::value, "error");

int main()
{

  BOOST_TEST_EQ(nmx::wide_neg(1), -1);
  BOOST_TEST_EQ(nmx::wide_neg(1u), uint64_t(-1));

  BOOST_TEST_EQ(nmx::wide_add(1, 2), int64_t(3));
  BOOST_TEST_EQ(nmx::wide_add(uint32_t(1), uint32_t(2)), uint64_t(3));
Exemplo n.º 23
0
 void update(const Timestamp& timestamp) {
     update_int32(uint32_t(timestamp));
 }
Exemplo n.º 24
0
int main()
{

  BOOST_TEST_EQ(nmx::wide_neg(1), -1);
  BOOST_TEST_EQ(nmx::wide_neg(1u), uint64_t(-1));

  BOOST_TEST_EQ(nmx::wide_add(1, 2), int64_t(3));
  BOOST_TEST_EQ(nmx::wide_add(uint32_t(1), uint32_t(2)), uint64_t(3));
  BOOST_TEST_EQ(nmx::wide_add(int8_t(127), int8_t(127)), int16_t(254));
  BOOST_TEST_EQ(nmx::wide_add(int8_t(-128), int8_t(-128)), int16_t(-256));


  BOOST_TEST_EQ(nmx::wide_sub(1, 2), int64_t(-1));
  BOOST_TEST_EQ(nmx::wide_sub(int8_t(127), int8_t(-128)), int16_t(255));
  BOOST_TEST_EQ(nmx::wide_sub(int8_t(-128), int8_t(127)), int16_t(-255));

  BOOST_TEST_EQ(nmx::wide_sub(uint8_t(132), uint8_t(100)), uint16_t(32));
  BOOST_TEST_EQ(nmx::wide_sub(uint8_t(100), uint8_t(101)), uint16_t(-1));

  BOOST_TEST_EQ(nmx::wide_add2(int8_t(100), int8_t(100), int8_t(100)), int16_t(300));

  BOOST_TEST_EQ(nmx::wide_sub2(int8_t(-100), int8_t(100), int8_t(100)), int16_t(-300));

  BOOST_TEST_EQ(nmx::wide_lsh(uint8_t(0x12), 4), uint16_t(0x120));
  BOOST_TEST_EQ(nmx::wide_lsh(int16_t(0x1234), 4), int32_t(0x12340));

  BOOST_TEST_EQ(nmx::wide_lshadd(int16_t(0x1234), 16, int16_t(0x1234)), int32_t(0x12341234));

  BOOST_TEST_EQ(nmx::wide_mul(int8_t(-2), int8_t(100)), int16_t(-200));
  BOOST_TEST_EQ(nmx::wide_mul(uint8_t(2), uint8_t(100)), uint16_t(200));

  BOOST_TEST_EQ(nmx::wide_muladd(int8_t(-2), int8_t(100), int8_t(100)), int16_t(-100));
  BOOST_TEST_EQ(nmx::wide_muladd(int8_t(2), int8_t(100), int8_t(100)), int16_t(300));

  BOOST_TEST_EQ(nmx::wide_muladd(uint8_t(2), uint8_t(100), uint8_t(100)), uint16_t(300));

  BOOST_TEST_EQ(nmx::wide_muladd2(int8_t(2), int8_t(100), int8_t(100), int8_t(100)), int16_t(400));
  BOOST_TEST_EQ(nmx::wide_mulsub2(int8_t(2), int8_t(100), int8_t(100), int8_t(100)), int16_t(0));

  BOOST_TEST_EQ(nmx::wide_divn(int16_t(0x1024), int8_t(2)), int8_t(0x12));
  BOOST_TEST_EQ(nmx::wide_divw(int16_t(200), int8_t(5)), int16_t(40));

  {
    uint8_t l;
    static_assert(std::is_same<uint8_t, std::make_unsigned<int8_t>::type>::value, "UT must be make_unsigned_t<T>");

    uint8_t h = nmx::split_neg(&l, int8_t(1));
    BOOST_TEST_EQ(l, uint8_t(0xff));
    BOOST_TEST_EQ(h, uint8_t(0xff));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_neg(&l, int8_t(1));
    BOOST_TEST_EQ(l, uint8_t(0xff));
    BOOST_TEST_EQ(h, uint8_t(0xff));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_neg(&l, int8_t(127));
    BOOST_TEST_EQ(l, uint8_t(0x81));
    BOOST_TEST_EQ(h, uint8_t(0xff));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_neg(&l, int8_t(-128));
    BOOST_TEST_EQ(l, uint8_t(0x80));
    BOOST_TEST_EQ(h, uint8_t(0x00));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_neg(&l, uint8_t(-1));
    BOOST_TEST_EQ(l, uint8_t(0x01));
    BOOST_TEST_EQ(h, uint8_t(0xff));
  }
  {
    std::cout << std::hex << short(-uint8_t(-1)) << std::endl;
    uint64_t l;
    uint64_t h = nmx::split_neg(&l, uint64_t(-1));
    BOOST_TEST_EQ(l, uint64_t(0x01));
    BOOST_TEST_EQ(h, uint64_t(-1));
  }

  {
    uint8_t l;
    uint8_t h = nmx::split_neg(&l, int8_t(-1));
    BOOST_TEST_EQ(l, uint8_t(0x01));
    BOOST_TEST_EQ(h, uint8_t(0x00));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_neg(&l, uint8_t(2));
    BOOST_TEST_EQ(l, uint8_t(0xfe));
    BOOST_TEST_EQ(h, uint8_t(0xff));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_add(&l, uint8_t(1), uint8_t(2));
    BOOST_TEST_EQ(l, uint8_t(0x03));
    BOOST_TEST_EQ(h, uint8_t(0x00));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_add(&l, uint8_t(0xff), uint8_t(0x01));
    BOOST_TEST_EQ(l, uint8_t(0x00));
    BOOST_TEST_EQ(h, uint8_t(0x01));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_add(&l, uint8_t(0x01), uint8_t(0x01));
    BOOST_TEST_EQ(l, uint8_t(0x02));
    BOOST_TEST_EQ(h, uint8_t(0x00));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_add(&l, uint8_t(0xff), uint8_t(0x02));
    BOOST_TEST_EQ(l, uint8_t(0x01));
    BOOST_TEST_EQ(h, uint8_t(0x01));
  }
  {
    uint8_t l;
    uint8_t h = nmx::split_sub(&l, uint8_t(0x00), uint8_t(0x01));
    BOOST_TEST_EQ(l, uint8_t(0xff));
    BOOST_TEST_EQ(h, uint8_t(0xff));
  }
  {
    uint32_t l;
    uint32_t h = nmx::split_add(&l, uint32_t(-1), uint32_t(0x01));
    BOOST_TEST_EQ(l, uint32_t(0x00));
    BOOST_TEST_EQ(h, uint32_t(0x01));
  }
  {
    uint32_t l;
    uint32_t h = nmx::split_add(&l, int32_t(-1), int32_t(0x01));
    BOOST_TEST_EQ(l, uint32_t(0x00));
    BOOST_TEST_EQ(h, uint32_t(0x00));
  }
  {
    uint64_t l;
    uint64_t h = nmx::split_add(&l, uint64_t(1), uint64_t(0x01));
    BOOST_TEST_EQ(l, uint64_t(0x02));
    BOOST_TEST_EQ(h, uint64_t(0x00));
  }
  {
    uint64_t l;
    uint64_t h = nmx::split_add(&l, uint64_t(-1), uint64_t(0x01));
    BOOST_TEST_EQ(l, uint64_t(0x00));
    BOOST_TEST_EQ(h, uint64_t(0x01));
  }
  {
    uint64_t l;
    uint64_t h = nmx::split_add(&l, int64_t(-1), int64_t(0x01));
    BOOST_TEST_EQ(l, uint64_t(0x00));
    BOOST_TEST_EQ(h, uint64_t(0x00));
  }

  return ::boost::report_errors();
}
/**
 * @details
 * Method to test the deserialise() method of the adapter.
 */
void AdapterTimeSeriesDataSetTest::test_deserialise()
{
    try {
        // Create configuration node.
        _config = _configXml(_fixedSizePackets, _dataBitSize,
                _udpPacketsPerIteration, _samplesPerPacket,
                _outputChannelsPerSubband, _subbandsPerPacket, _nRawPolarisations);

        typedef TYPES::i8complex i8c;
        typedef TYPES::i16complex i16c;

        // Construct the adapter.
        AdapterTimeSeriesDataSet adapter(_config);

        // Construct a data blob to adapt into.
        TimeSeriesDataSetC32 timeSeries;

        size_t chunkSize = sizeof(UDPPacket) * _udpPacketsPerIteration;

        // Configure the adapter setting the data blob, chunk size and service data.
        adapter.config(&timeSeries, chunkSize, QHash<QString, DataBlob*>());

        // Create and fill a UDP packet.
        std::vector<UDPPacket> packets(_udpPacketsPerIteration);
        unsigned index = 0;
        for (unsigned i = 0; i < _udpPacketsPerIteration; ++i) {

            // Fill in the header
            packets[i].header.version             = uint8_t(0 + i);
            packets[i].header.sourceInfo          = uint8_t(1 + i);
            packets[i].header.configuration       = uint16_t(_dataBitSize);
            packets[i].header.station             = uint16_t(3 + i);
            packets[i].header.nrBeamlets          = uint8_t(4 + i);
            packets[i].header.nrBlocks            = uint8_t(5 + i);
            packets[i].header.timestamp           = uint32_t(6 + i);
            packets[i].header.blockSequenceNumber = uint32_t(7 + i);

            // Fill in the data
            for (unsigned ii = 0, t = 0; t < _samplesPerPacket; ++t) {
                for (unsigned c = 0; c < _subbandsPerPacket; ++c) {
                    for (unsigned p = 0; p < _nRawPolarisations; ++p) {

                        if (_dataBitSize == 8) {
                            i8c* data = reinterpret_cast<i8c*>(packets[i].data);
                            index = _nRawPolarisations * (t * _subbandsPerPacket + c) + p;
                            data[index] = i8c(ii++, i);
                        }
                        else if (_dataBitSize == 16) {
                            i16c* data = reinterpret_cast<i16c*>(packets[i].data);
                            index = _nRawPolarisations * (t * _subbandsPerPacket + c) + p;
                            data[index] = i16c(ii++, i);
                        }

                    }
                }
            }
        }


        // Stick the packet into an QIODevice.
        QBuffer buffer;
        buffer.setData(reinterpret_cast<char*>(&packets[0]), chunkSize);
        buffer.open(QBuffer::ReadOnly);


        adapter.deserialise(&buffer);
    }
    catch (const QString& err) {
        CPPUNIT_FAIL(err.toStdString().data());
    }
}
Exemplo n.º 26
0
RegionDescPtr selectTraceletLegacy(Offset initSpOffset,
                                   const Tracelet& tlet) {
  typedef RegionDesc::Block Block;

  auto const region = std::make_shared<RegionDesc>();
  SrcKey sk(tlet.m_sk);
  auto const unit = tlet.func()->unit();

  const Func* topFunc = nullptr;
  Block* curBlock = nullptr;
  auto newBlock = [&](SrcKey start, Offset spOff) {
    assert(curBlock == nullptr || curBlock->length() > 0);
    region->blocks.push_back(
      std::make_shared<Block>(
        start.func(), start.resumed(), start.offset(), 0, spOff));
    Block* newCurBlock = region->blocks.back().get();
    if (curBlock) {
      region->addArc(curBlock->id(), newCurBlock->id());
    }
    curBlock = newCurBlock;
  };
  newBlock(sk, initSpOffset);

  for (auto ni = tlet.m_instrStream.first; ni; ni = ni->next) {
    assert(sk == ni->source);
    assert(ni->unit() == unit);

    Offset curSpOffset = initSpOffset + ni->stackOffset;

    curBlock->addInstruction();
    if ((curBlock->length() == 1 && ni->funcd != nullptr) ||
        ni->funcd != topFunc) {
      topFunc = ni->funcd;
      curBlock->setKnownFunc(sk, topFunc);
    }

    if (ni->calleeTrace && !ni->calleeTrace->m_inliningFailed) {
      assert(ni->op() == Op::FCall || ni->op() == Op::FCallD);
      assert(ni->funcd == ni->calleeTrace->func());
      // This should be translated as an inlined call. Insert the blocks of the
      // callee in the region.
      auto const& callee = *ni->calleeTrace;
      curBlock->setInlinedCallee(ni->funcd);
      SrcKey cSk = callee.m_sk;
      auto const cUnit = callee.func()->unit();

      // Note: the offsets of the inlined blocks aren't currently read
      // for anything, so it's unclear whether they should be relative
      // to the main function entry or the inlined function.  We're
      // just doing this for now.
      auto const initInliningSpOffset = curSpOffset;
      newBlock(cSk,
               initInliningSpOffset + callee.m_instrStream.first->stackOffset);

      for (auto cni = callee.m_instrStream.first; cni; cni = cni->next) {
        // Sometimes inlined callees trace through jumps that have a
        // known taken/non-taken state based on the calling context:
        if (cni->nextOffset != kInvalidOffset) {
          curBlock->addInstruction();
          cSk.setOffset(cni->nextOffset);
          newBlock(cSk, initInliningSpOffset + ni->stackOffset);
          continue;
        }

        assert(cSk == cni->source);
        assert(cni->op() == OpRetC ||
               cni->op() == OpRetV ||
               cni->op() == OpCreateCont ||
               cni->op() == OpAwait ||
               cni->op() == OpNativeImpl ||
               !instrIsNonCallControlFlow(cni->op()));

        curBlock->addInstruction();
        cSk.advance(cUnit);
      }

      if (ni->next) {
        sk.advance(unit);
        newBlock(sk, curSpOffset);
      }
      continue;
    }

    if (!ni->noOp && isFPassStar(ni->op())) {
      curBlock->setParamByRef(sk, ni->preppedByRef);
    }

    if (ni->next && isUnconditionalJmp(ni->op())) {
      // A Jmp that isn't the final instruction in a Tracelet means we traced
      // through a forward jump in analyze. Update sk to point to the next NI
      // in the stream.
      auto dest = ni->offset() + ni->imm[0].u_BA;
      assert(dest > sk.offset()); // We only trace for forward Jmps for now.
      sk.setOffset(dest);

      // The Jmp terminates this block.
      newBlock(sk, curSpOffset);
    } else {
      sk.advance(unit);
    }
  }

  auto& frontBlock = *region->blocks.front();

  // Add tracelet guards as predictions on the first instruction. Predictions
  // and known types from static analysis will be applied by
  // Translator::translateRegion.
  for (auto const& dep : tlet.m_dependencies) {
    if (dep.second->rtt.isVagueValue() ||
        dep.second->location.isThis()) continue;

    typedef RegionDesc R;
    auto addPred = [&](const R::Location& loc) {
      auto type = Type(dep.second->rtt);
      frontBlock.addPredicted(tlet.m_sk, {loc, type});
    };

    switch (dep.first.space) {
      case Location::Stack: {
        uint32_t offsetFromSp = uint32_t(-dep.first.offset - 1);
        uint32_t offsetFromFp = initSpOffset - offsetFromSp;
        addPred(R::Location::Stack{offsetFromSp, offsetFromFp});
        break;
      }
      case Location::Local:
        addPred(R::Location::Local{uint32_t(dep.first.offset)});
        break;

      default: not_reached();
    }
  }

  // Add reffiness dependencies as predictions on the first instruction.
  for (auto const& dep : tlet.m_refDeps.m_arMap) {
    RegionDesc::ReffinessPred pred{dep.second.m_mask,
                                   dep.second.m_vals,
                                   dep.first};
    frontBlock.addReffinessPred(tlet.m_sk, pred);
  }

  FTRACE(2, "Converted Tracelet:\n{}\nInto RegionDesc:\n{}\n",
         tlet.toString(), show(*region));
  return region;
}
Exemplo n.º 27
0
bool TouchExtensionGlobal::postTouchEvent(QTouchEvent *event, Surface *surface)
{
    const QList<QTouchEvent::TouchPoint> points = event->touchPoints();
    const int pointCount = points.count();
    if (!pointCount)
        return false;

    QPointF surfacePos = surface->pos();
    wl_client *surfaceClient = surface->resource()->client();
    uint32_t time = m_compositor->currentTimeMsecs();
    const int rescount = m_resources.count();

    for (int res = 0; res < rescount; ++res) {
        wl_resource *target = m_resources.at(res);
        if (target->client != surfaceClient)
            continue;

        // We will use no touch_frame type of event, to reduce the number of
        // events flowing through the wire. Instead, the number of points sent is
        // included in the touch point events.
        int sentPointCount = 0;
        for (int i = 0; i < pointCount; ++i) {
            if (points.at(i).state() != Qt::TouchPointStationary)
                ++sentPointCount;
        }

        for (int i = 0; i < pointCount; ++i) {
            const QTouchEvent::TouchPoint &tp(points.at(i));
            // Stationary points are never sent. They are cached on client side.
            if (tp.state() == Qt::TouchPointStationary)
                continue;

            uint32_t id = tp.id();
            uint32_t state = (tp.state() & 0xFFFF) | (sentPointCount << 16);
            uint32_t flags = (tp.flags() & 0xFFFF) | (int(event->device()->capabilities()) << 16);

            QPointF p = tp.pos() - surfacePos; // surface-relative
            int x = toFixed(p.x());
            int y = toFixed(p.y());
            int nx = toFixed(tp.normalizedPos().x());
            int ny = toFixed(tp.normalizedPos().y());
            int w = toFixed(tp.rect().width());
            int h = toFixed(tp.rect().height());
            int vx = toFixed(tp.velocity().x());
            int vy = toFixed(tp.velocity().y());
            uint32_t pressure = uint32_t(tp.pressure() * 255);

            wl_array *rawData = 0;
            QVector<QPointF> rawPosList = tp.rawScreenPositions();
            int rawPosCount = rawPosList.count();
            if (rawPosCount) {
                rawPosCount = qMin(maxRawPos, rawPosCount);
                rawData = &m_rawdata_array;
                rawData->size = rawPosCount * sizeof(float) * 2;
                float *p = m_rawdata_ptr;
                for (int rpi = 0; rpi < rawPosCount; ++rpi) {
                    const QPointF &rawPos(rawPosList.at(rpi));
                    // This will stay in screen coordinates for performance
                    // reasons, clients using this data will presumably know
                    // what they are doing.
                    *p++ = float(rawPos.x());
                    *p++ = float(rawPos.y());
                }
            }

            qt_touch_extension_send_touch(target,
                                          time, id, state,
                                          x, y, nx, ny, w, h,
                                          pressure, vx, vy,
                                          flags, rawData);
        }

        return true;
    }

    return false;
}
Exemplo n.º 28
0
	uint32_t bit_stream::read_unsigned_bits(size_t bits)
	{
		return uint32_t(read_bits(bits));
	}
Exemplo n.º 29
0
namespace nt2 { namespace ext
{
  template<class Dummy>
  struct call<tag::ffs_(tag::type8_),
              tag::cpu_, Dummy> : callable
  {
    template<class Sig> struct result;
    template<class This,class A0>
    struct result<This(A0)> : meta::as_integer<A0, unsigned>{};

    NT2_FUNCTOR_CALL(1)
    {
    #ifdef BOOST_MSVC
      unsigned long index = 0;
      _BitScanForward(&index, uint32_t(uint8_t(a0)));
      return index;
    #else
      return __builtin_ffs(uint32_t(uint8_t(a0)));
    #endif
    }
  };
} }

/////////////////////////////////////////////////////////////////////////////
// Implementation when type A0 is type64_
/////////////////////////////////////////////////////////////////////////////
NT2_REGISTER_DISPATCH(tag::ffs_, tag::cpu_,
                     (A0),
                     (type64_<A0>)
                    )
Exemplo n.º 30
0
static uint32_t isBigEndian(void)
{
	int32_t i = 1;
	bool b = *(reinterpret_cast<char*>(&i))==0;
	return b ? uint32_t(1) : uint32_t(0);
}