bool SkDeferredCanvas::isFullFrame(const SkRect* rect, const SkPaint* paint) const { SkCanvas* canvas = drawingCanvas(); SkISize canvasSize = getDeviceSize(); if (rect) { if (!canvas->getTotalMatrix().rectStaysRect()) { return false; // conservative } SkRect transformedRect; canvas->getTotalMatrix().mapRect(&transformedRect, *rect); if (paint) { SkPaint::Style paintStyle = paint->getStyle(); if (!(paintStyle == SkPaint::kFill_Style || paintStyle == SkPaint::kStrokeAndFill_Style)) { return false; } if (paint->getMaskFilter() || paint->getLooper() || paint->getPathEffect() || paint->getImageFilter()) { return false; // conservative } } // The following test holds with AA enabled, and is conservative // by a 0.5 pixel margin with AA disabled if (transformedRect.fLeft > SkIntToScalar(0) || transformedRect.fTop > SkIntToScalar(0) || transformedRect.fRight < SkIntToScalar(canvasSize.fWidth) || transformedRect.fBottom < SkIntToScalar(canvasSize.fHeight)) { return false; } } switch (canvas->getClipType()) { case SkCanvas::kRect_ClipType : { SkIRect bounds; canvas->getClipDeviceBounds(&bounds); if (bounds.fLeft > 0 || bounds.fTop > 0 || bounds.fRight < canvasSize.fWidth || bounds.fBottom < canvasSize.fHeight) return false; } break; case SkCanvas::kComplex_ClipType : return false; // conservative case SkCanvas::kEmpty_ClipType: default: break; }; return true; }
SEXP gridCallback(GEevent task, pGEDevDesc dd, SEXP data) { SEXP result = R_NilValue; SEXP valid, scale; SEXP gridState; GESystemDesc *sd; SEXP currentgp; SEXP gsd; SEXP devsize; R_GE_gcontext gc; switch (task) { case GE_InitState: /* Create the initial grid state for a device */ PROTECT(gridState = createGridSystemState()); /* Store that state with the device for easy retrieval */ sd = dd->gesd[gridRegisterIndex]; sd->systemSpecific = (void*) gridState; /* Initialise the grid state for a device */ fillGridSystemState(gridState, dd); /* Also store the state beneath a top-level variable so * that it does not get garbage-collected */ globaliseState(gridState); /* Indicate success */ result = R_BlankString; UNPROTECT(1); break; case GE_FinaliseState: sd = dd->gesd[gridRegisterIndex]; /* Simply detach the system state from the global variable * and it will be garbage-collected */ deglobaliseState((SEXP) sd->systemSpecific); /* Also set the device pointer to NULL */ sd->systemSpecific = NULL; break; case GE_SaveState: break; case GE_RestoreState: gsd = (SEXP) dd->gesd[gridRegisterIndex]->systemSpecific; PROTECT(devsize = allocVector(REALSXP, 2)); getDeviceSize(dd, &(REAL(devsize)[0]), &(REAL(devsize)[1])); SET_VECTOR_ELT(gsd, GSS_DEVSIZE, devsize); UNPROTECT(1); /* Only bother to do any grid drawing setup * if there has been grid output * on this device. */ if (LOGICAL(gridStateElement(dd, GSS_GRIDDEVICE))[0]) { if (LOGICAL(gridStateElement(dd, GSS_ENGINEDLON))[0]) { /* The graphics engine is about to replay the display list * So we "clear" the device and reset the grid graphics state */ /* There are two main situations in which this occurs: * (i) a screen is resized * In this case, it is ok-ish to do a GENewPage * because that has the desired effect and no * undesirable effects because it only happens on * a screen device -- a new page is the same as * clearing the screen * (ii) output on one device is copied to another device * In this case, a GENewPage is NOT a good thing, however, * here we will start with a new device and it will not * have any grid output so this section will not get called * SO we will not get any unwanted blank pages. * * All this is a bit fragile; ultimately, what would be ideal * is a dev->clearPage primitive for all devices in addition * to the dev->newPage primitive */ currentgp = gridStateElement(dd, GSS_GPAR); gcontextFromgpar(currentgp, 0, &gc, dd); GENewPage(&gc, dd); initGPar(dd); initVP(dd); initOtherState(dd); } else { /* * If we have turned off the graphics engine's display list * then we have to redraw the scene ourselves */ SEXP fcall; PROTECT(fcall = lang1(install("draw.all"))); eval(fcall, R_gridEvalEnv); UNPROTECT(1); } } break; case GE_CopyState: break; case GE_CheckPlot: PROTECT(valid = allocVector(LGLSXP, 1)); LOGICAL(valid)[0] = TRUE; UNPROTECT(1); result = valid; case GE_SaveSnapshotState: break; case GE_RestoreSnapshotState: break; case GE_ScalePS: /* * data is a numeric scale factor */ PROTECT(scale = allocVector(REALSXP, 1)); REAL(scale)[0] = REAL(gridStateElement(dd, GSS_SCALE))[0]* REAL(data)[0]; setGridStateElement(dd, GSS_SCALE, scale); UNPROTECT(1); break; } return result; }
/* The idea is to produce a transformation for this viewport which * will take any location in INCHES and turn it into a location on the * Device in INCHES. * The reason for working in INCHES is because we want to be able to * do rotations as part of the transformation. * If "incremental" is true, then we just work from the "current" * values of the parent. Otherwise, we have to recurse and recalculate * everything from scratch. */ void calcViewportTransform(SEXP vp, SEXP parent, Rboolean incremental, pGEDevDesc dd) { int i, j; double vpWidthCM, vpHeightCM, rotationAngle; double parentWidthCM, parentHeightCM; double xINCHES, yINCHES; double xadj, yadj; double parentAngle; LViewportLocation vpl; LViewportContext vpc, parentContext; R_GE_gcontext gc, parentgc; LTransform thisLocation, thisRotation, thisJustification, thisTransform; LTransform tempTransform, parentTransform, transform; SEXP currentWidthCM, currentHeightCM, currentRotation; SEXP currentTransform; /* This should never be true when we are doing an incremental * calculation */ if (isNull(parent)) { /* We have a top-level viewport; the parent is the device */ getDeviceSize(dd, &parentWidthCM, &parentHeightCM); /* For a device the transform is the identity transform */ identity(parentTransform); /* For a device, xmin=0, ymin=0, xmax=1, ymax=1, and */ parentContext.xscalemin = 0; parentContext.yscalemin = 0; parentContext.xscalemax = 1; parentContext.yscalemax = 1; /* FIXME: How do I figure out the device fontsize ? * From ps.options etc, ... ? * FIXME: How do I figure out the device lineheight ?? * FIXME: How do I figure out the device cex ?? * FIXME: How do I figure out the device font ?? * FIXME: How do I figure out the device fontfamily ?? */ parentgc.ps = 10; parentgc.lineheight = 1.2; parentgc.cex = 1; parentgc.fontface = 1; parentgc.fontfamily[0] = '\0'; /* The device is not rotated */ parentAngle = 0; fillViewportLocationFromViewport(vp, &vpl); } else { /* Get parent transform (etc ...) * If necessary, recalculate the parent transform (etc ...) */ if (!incremental) calcViewportTransform(parent, viewportParent(parent), 0, dd); /* Get information required to transform viewport location */ parentWidthCM = REAL(viewportWidthCM(parent))[0]; parentHeightCM = REAL(viewportHeightCM(parent))[0]; parentAngle = REAL(viewportRotation(parent))[0]; for (i=0; i<3; i++) for (j=0; j<3; j++) parentTransform[i][j] = REAL(viewportTransform(parent))[i +3*j]; fillViewportContextFromViewport(parent, &parentContext); /* * Don't get gcontext from parent because the most recent * previous gpar setting may have come from a gTree * So we look at this viewport's parentgpar slot instead * * WAS gcontextFromViewport(parent, &parentgc); */ gcontextFromgpar(viewportParentGPar(vp), 0, &parentgc, dd); /* In order for the vp to get its vpl from a layout * it must have specified a layout.pos and the parent * must have a layout * FIXME: Actually, in addition, layout.pos.row and * layout.pos.col must be valid for the layout */ if ((isNull(viewportLayoutPosRow(vp)) && isNull(viewportLayoutPosCol(vp))) || isNull(viewportLayout(parent))) fillViewportLocationFromViewport(vp, &vpl); else if (checkPosRowPosCol(vp, parent)) calcViewportLocationFromLayout(viewportLayoutPosRow(vp), viewportLayoutPosCol(vp), parent, &vpl); } /* NOTE that we are not doing a transformLocn here because * we just want locations and dimensions (in INCHES) relative to * the parent, NOT relative to the device. */ /* First, convert the location of the viewport into CM */ xINCHES = transformXtoINCHES(vpl.x, 0, parentContext, &parentgc, parentWidthCM, parentHeightCM, dd); yINCHES = transformYtoINCHES(vpl.y, 0, parentContext, &parentgc, parentWidthCM, parentHeightCM, dd); /* Calculate the width and height of the viewport in CM too * so that any viewports within this one can do transformations */ vpWidthCM = transformWidthtoINCHES(vpl.width, 0, parentContext, &parentgc, parentWidthCM, parentHeightCM, dd)*2.54; vpHeightCM = transformHeighttoINCHES(vpl.height, 0, parentContext, &parentgc, parentWidthCM, parentHeightCM, dd)*2.54; /* Fall out if location or size are non-finite */ if (!R_FINITE(xINCHES) || !R_FINITE(yINCHES) || !R_FINITE(vpWidthCM) || !R_FINITE(vpHeightCM)) error(_("Non-finite location and/or size for viewport")); /* Determine justification required */ justification(vpWidthCM, vpHeightCM, vpl.hjust, vpl.vjust, &xadj, &yadj); /* Next, produce the transformation to add the location of * the viewport to the location. */ /* Produce transform for this viewport */ translation(xINCHES, yINCHES, thisLocation); if (viewportAngle(vp) != 0) rotation(viewportAngle(vp), thisRotation); else identity(thisRotation); translation(xadj/2.54, yadj/2.54, thisJustification); /* Position relative to origin of rotation THEN rotate. */ multiply(thisJustification, thisRotation, tempTransform); /* Translate to bottom-left corner. */ multiply(tempTransform, thisLocation, thisTransform); /* Combine with parent's transform */ multiply(thisTransform, parentTransform, transform); /* Sum up the rotation angles */ rotationAngle = parentAngle + viewportAngle(vp); /* Finally, allocate the rows and columns for this viewport's * layout if it has one */ if (!isNull(viewportLayout(vp))) { fillViewportContextFromViewport(vp, &vpc); gcontextFromViewport(vp, &gc, dd); calcViewportLayout(vp, vpWidthCM, vpHeightCM, vpc, &gc, dd); } /* Record all of the answers in the viewport * (the layout calculations are done within calcViewportLayout) */ PROTECT(currentWidthCM = ScalarReal(vpWidthCM)); PROTECT(currentHeightCM = ScalarReal(vpHeightCM)); PROTECT(currentRotation = ScalarReal(rotationAngle)); PROTECT(currentTransform = allocMatrix(REALSXP, 3, 3)); for (i=0; i<3; i++) for (j=0; j<3; j++) REAL(currentTransform)[i + 3*j] = transform[i][j]; SET_VECTOR_ELT(vp, PVP_WIDTHCM, currentWidthCM); SET_VECTOR_ELT(vp, PVP_HEIGHTCM, currentHeightCM); SET_VECTOR_ELT(vp, PVP_ROTATION, currentRotation); SET_VECTOR_ELT(vp, PVP_TRANS, currentTransform); UNPROTECT(4); }
void TV3DManager::display(Camera& whichCamera) { double nearZ = whichCamera.getNearClip(); // near clipping plane double farZ = whichCamera.getFarClip(); // far clipping plane // left eye portal int portalX = 0; int portalY = 0; auto glCanvas = Application::getInstance()->getGLWidget(); QSize deviceSize = glCanvas->getDeviceSize() * Application::getInstance()->getRenderResolutionScale(); int portalW = deviceSize.width() / 2; int portalH = deviceSize.height(); ApplicationOverlay& applicationOverlay = Application::getInstance()->getApplicationOverlay(); // We only need to render the overlays to a texture once, then we just render the texture as a quad // PrioVR will only work if renderOverlay is called, calibration is connected to Application::renderingOverlay() applicationOverlay.renderOverlay(); DependencyManager::get<GlowEffect>()->prepare(); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_SCISSOR_TEST); // render left side view glViewport(portalX, portalY, portalW, portalH); glScissor(portalX, portalY, portalW, portalH); Camera eyeCamera; eyeCamera.setRotation(whichCamera.getRotation()); eyeCamera.setPosition(whichCamera.getPosition()); glPushMatrix(); { _activeEye = &_leftEye; glMatrixMode(GL_PROJECTION); glLoadIdentity(); // reset projection matrix glFrustum(_leftEye.left, _leftEye.right, _leftEye.bottom, _leftEye.top, nearZ, farZ); // set left view frustum GLfloat p[4][4]; glGetFloatv(GL_PROJECTION_MATRIX, &(p[0][0])); GLfloat cotangent = p[1][1]; GLfloat fov = atan(1.0f / cotangent); glTranslatef(_leftEye.modelTranslation, 0.0, 0.0); // translate to cancel parallax glMatrixMode(GL_MODELVIEW); glLoadIdentity(); eyeCamera.setEyeOffsetPosition(glm::vec3(-_activeEye->modelTranslation,0,0)); Application::getInstance()->displaySide(eyeCamera, false, RenderArgs::MONO); applicationOverlay.displayOverlayTexture3DTV(whichCamera, _aspect, fov); _activeEye = NULL; } glPopMatrix(); glDisable(GL_SCISSOR_TEST); // render right side view portalX = deviceSize.width() / 2; glEnable(GL_SCISSOR_TEST); // render left side view glViewport(portalX, portalY, portalW, portalH); glScissor(portalX, portalY, portalW, portalH); glPushMatrix(); { _activeEye = &_rightEye; glMatrixMode(GL_PROJECTION); glLoadIdentity(); // reset projection matrix glFrustum(_rightEye.left, _rightEye.right, _rightEye.bottom, _rightEye.top, nearZ, farZ); // set right view frustum GLfloat p[4][4]; glGetFloatv(GL_PROJECTION_MATRIX, &(p[0][0])); GLfloat cotangent = p[1][1]; GLfloat fov = atan(1.0f / cotangent); glTranslatef(_rightEye.modelTranslation, 0.0, 0.0); // translate to cancel parallax glMatrixMode(GL_MODELVIEW); glLoadIdentity(); eyeCamera.setEyeOffsetPosition(glm::vec3(-_activeEye->modelTranslation,0,0)); Application::getInstance()->displaySide(eyeCamera, false, RenderArgs::MONO); applicationOverlay.displayOverlayTexture3DTV(whichCamera, _aspect, fov); _activeEye = NULL; } glPopMatrix(); glDisable(GL_SCISSOR_TEST); // reset the viewport to how we started glViewport(0, 0, deviceSize.width(), deviceSize.height()); DependencyManager::get<GlowEffect>()->render(); }
void Application::paintGL() { // Some plugins process message events, allowing paintGL to be called reentrantly. if (_aboutToQuit || _window->isMinimized()) { return; } _renderFrameCount++; _lastTimeRendered.start(); auto lastPaintBegin = usecTimestampNow(); PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount); PerformanceTimer perfTimer("paintGL"); if (nullptr == _displayPlugin) { return; } DisplayPluginPointer displayPlugin; { PROFILE_RANGE(render, "/getActiveDisplayPlugin"); displayPlugin = getActiveDisplayPlugin(); } { PROFILE_RANGE(render, "/pluginBeginFrameRender"); // If a display plugin loses it's underlying support, it // needs to be able to signal us to not use it if (!displayPlugin->beginFrameRender(_renderFrameCount)) { QMetaObject::invokeMethod(this, "updateDisplayMode"); return; } } RenderArgs renderArgs; glm::mat4 HMDSensorPose; glm::mat4 eyeToWorld; glm::mat4 sensorToWorld; bool isStereo; glm::mat4 stereoEyeOffsets[2]; glm::mat4 stereoEyeProjections[2]; { QMutexLocker viewLocker(&_renderArgsMutex); renderArgs = _appRenderArgs._renderArgs; // don't render if there is no context. if (!_appRenderArgs._renderArgs._context) { return; } HMDSensorPose = _appRenderArgs._headPose; eyeToWorld = _appRenderArgs._eyeToWorld; sensorToWorld = _appRenderArgs._sensorToWorld; isStereo = _appRenderArgs._isStereo; for_each_eye([&](Eye eye) { stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye]; stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye]; }); } { PROFILE_RANGE(render, "/gpuContextReset"); _gpuContext->beginFrame(HMDSensorPose); // Reset the gpu::Context Stages // Back to the default framebuffer; gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) { batch.resetStages(); }); } { PROFILE_RANGE(render, "/renderOverlay"); PerformanceTimer perfTimer("renderOverlay"); // NOTE: There is no batch associated with this renderArgs // the ApplicationOverlay class assumes it's viewport is setup to be the device size renderArgs._viewport = glm::ivec4(0, 0, getDeviceSize()); _applicationOverlay.renderOverlay(&renderArgs); } { PROFILE_RANGE(render, "/updateCompositor"); getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld); } gpu::FramebufferPointer finalFramebuffer; QSize finalFramebufferSize; { PROFILE_RANGE(render, "/getOutputFramebuffer"); // Primary rendering pass auto framebufferCache = DependencyManager::get<FramebufferCache>(); finalFramebufferSize = framebufferCache->getFrameBufferSize(); // Final framebuffer that will be handled to the display-plugin finalFramebuffer = framebufferCache->getFramebuffer(); } { if (isStereo) { renderArgs._context->enableStereo(true); renderArgs._context->setStereoProjections(stereoEyeProjections); renderArgs._context->setStereoViews(stereoEyeOffsets); } renderArgs._hudOperator = displayPlugin->getHUDOperator(); renderArgs._hudTexture = _applicationOverlay.getOverlayTexture(); renderArgs._blitFramebuffer = finalFramebuffer; runRenderFrame(&renderArgs); } auto frame = _gpuContext->endFrame(); frame->frameIndex = _renderFrameCount; frame->framebuffer = finalFramebuffer; frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) { DependencyManager::get<FramebufferCache>()->releaseFramebuffer(framebuffer); }; // deliver final scene rendering commands to the display plugin { PROFILE_RANGE(render, "/pluginOutput"); PerformanceTimer perfTimer("pluginOutput"); _renderLoopCounter.increment(); displayPlugin->submitFrame(frame); } // Reset the framebuffer and stereo state renderArgs._blitFramebuffer.reset(); renderArgs._context->enableStereo(false); { Stats::getInstance()->setRenderDetails(renderArgs._details); } uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin; _frameTimingsScriptingInterface.addValue(lastPaintDuration); }
void Model::create(const std::string& filepath) { std::ifstream fin(filepath.c_str(), std::ios::in | std::ios::binary); if (fin.is_open()) { ResHeader header = {}; fin.read(reinterpret_cast<char*>(&header), sizeof(header)); m_bindingDescriptions[0].binding = 0; m_bindingDescriptions[0].stride = sizeof(Vertex); m_bindingDescriptions[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; m_attributeDescriptions[0].binding = 0; m_attributeDescriptions[0].location = 0; m_attributeDescriptions[0].format = VK_FORMAT_R32G32B32_SFLOAT; m_attributeDescriptions[0].offset = 0; m_attributeDescriptions[1].binding = 0; m_attributeDescriptions[1].location = 1; m_attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT; m_attributeDescriptions[1].offset = sizeof(float) * 3; m_vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; m_vertexInputInfo.pNext = NULL; m_vertexInputInfo.flags = 0; m_vertexInputInfo.vertexBindingDescriptionCount = 1; m_vertexInputInfo.pVertexBindingDescriptions = &m_bindingDescriptions[0]; m_vertexInputInfo.vertexAttributeDescriptionCount = 2; m_vertexInputInfo.pVertexAttributeDescriptions = &m_attributeDescriptions[0]; std::unique_ptr<ResVec3> vertices; std::unique_ptr<ResFace> faces; vertices.reset(new ResVec3[header.numVerts]); faces.reset(new ResFace[header.numFaces]); fin.read(reinterpret_cast<char*>(vertices.get()), sizeof(ResVec3) * header.numVerts); fin.read(reinterpret_cast<char*>(faces.get()), sizeof(ResFace) * header.numFaces); { VkBufferCreateInfo bufInfo = {}; bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufInfo.pNext = NULL; bufInfo.size = getDeviceSize(sizeof(Vertex) * header.numVerts, 256); bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; bufInfo.flags = 0; auto err = vkCreateBuffer(getDevice(), &bufInfo, nullptr, &m_vertices.buffer); VkMemoryRequirements memReqs = {}; vkGetBufferMemoryRequirements(getDevice(), m_vertices.buffer, &memReqs); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(getDevice(), &memAlloc, nullptr, &m_vertices.memory); Vertex* mapped = nullptr; err = vkMapMemory(getDevice(), m_vertices.memory, 0, bufInfo.size, 0, reinterpret_cast<void**>(&mapped)); for (uint32_t i = 0; i < header.numVerts; i++) { mapped[i].x = vertices.get()[i].x; mapped[i].y = vertices.get()[i].y; mapped[i].z = vertices.get()[i].z; mapped[i].nx = 0.0f; mapped[i].ny = 0.0f; mapped[i].nz = 0.0f; } for (uint32_t i = 0; i < header.numFaces; i++) { Vertex* p0 = &mapped[faces.get()[i].index[0]]; Vertex* p1 = &mapped[faces.get()[i].index[1]]; Vertex* p2 = &mapped[faces.get()[i].index[2]]; p0->nx += faces.get()[i].normal.x; p0->ny += faces.get()[i].normal.y; p0->nz += faces.get()[i].normal.z; p1->nx += faces.get()[i].normal.x; p1->ny += faces.get()[i].normal.y; p1->nz += faces.get()[i].normal.z; p2->nx += faces.get()[i].normal.x; p2->ny += faces.get()[i].normal.y; p2->nz += faces.get()[i].normal.z; } for (uint32_t i = 0; i < header.numVerts; i++) { glm::vec3 n(mapped[i].nx, mapped[i].ny, mapped[i].nz); n = glm::normalize(n); mapped[i].nx = n.x; mapped[i].ny = n.y; mapped[i].nz = n.z; } vkUnmapMemory(getDevice(), m_vertices.memory); vkBindBufferMemory(getDevice(), m_vertices.buffer, m_vertices.memory, 0); } { VkBufferCreateInfo bufInfo = {}; bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufInfo.pNext = NULL; bufInfo.size = getDeviceSize(sizeof(uint32_t) * header.numFaces * 3, 256); bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; bufInfo.flags = 0; auto err = vkCreateBuffer(getDevice(), &bufInfo, nullptr, &m_indices.buffer); VkMemoryRequirements memReqs = {}; vkGetBufferMemoryRequirements(getDevice(), m_indices.buffer, &memReqs); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(getDevice(), &memAlloc, nullptr, &m_indices.memory); uint32_t* mapped = nullptr; err = vkMapMemory(getDevice(), m_indices.memory, 0, bufInfo.size, 0, reinterpret_cast<void**>(&mapped)); uint32_t index = 0; for (uint32_t i = 0; i < header.numFaces; i++) { mapped[index + 0] = faces.get()[i].index[0]; mapped[index + 1] = faces.get()[i].index[1]; mapped[index + 2] = faces.get()[i].index[2]; index += 3; } vkUnmapMemory(getDevice(), m_indices.memory); vkBindBufferMemory(getDevice(), m_indices.buffer, m_indices.memory, 0); m_indexCount = header.numFaces * 3; } fin.close(); } }