// 音声入力を開始する void KinectAudio::start() { memset( &outputBufferStruct_, 0, sizeof(outputBufferStruct_) ); outputBufferStruct_.pBuffer = &mediaBuffer_; // Set DMO output format DMO_MEDIA_TYPE mt = {0}; CHECKHR( ::MoInitMediaType( &mt, sizeof(WAVEFORMATEX) ) ); mt.majortype = MEDIATYPE_Audio; mt.subtype = MEDIASUBTYPE_PCM; mt.lSampleSize = 0; mt.bFixedSizeSamples = TRUE; mt.bTemporalCompression = FALSE; mt.formattype = FORMAT_WaveFormatEx; memcpy( mt.pbFormat, &getWaveFormat(), sizeof(WAVEFORMATEX) ); CHECKHR( mediaObject_->SetOutputType( 0, &mt, 0 ) ); ::MoFreeMediaType( &mt ); // Allocate streaming resources. This step is optional. If it is not called here, it // will be called when first time ProcessInput() is called. However, if you want to // get the actual frame size being used, it should be called explicitly here. CHECKHR( mediaObject_->AllocateStreamingResources() ); // allocate output buffer mediaBuffer_.SetBufferLength( getWaveFormat().nSamplesPerSec * getWaveFormat().nBlockAlign ); }
HRESULT ShaderPass::InitializeGraphics( const ComPtr<ID3D11Device>& device, VertexFormat format, const uint8_t* vertexShader, size_t vertexShaderNumBytes, const uint8_t* pixelShader, size_t pixelShaderNumBytes) { Device = device; Device->GetImmediateContext(&Context); Type = ShaderPassType::Graphics; HRESULT hr = Device->CreateVertexShader(vertexShader, vertexShaderNumBytes, nullptr, &VertexShader); CHECKHR(hr); if (pixelShader) { hr = Device->CreatePixelShader(pixelShader, pixelShaderNumBytes, nullptr, &PixelShader); CHECKHR(hr); } hr = CreateInputLayout(format, vertexShader, vertexShaderNumBytes); CHECKHR(hr); CurrentInputBinding = format; return hr; }
HRESULT ForwardPlusRenderer::RecreateSurfaces(uint32_t width, uint32_t height, uint32_t sampleCount) { DepthBuffer = nullptr; FinalRTMsaa = nullptr; D3D11_TEXTURE2D_DESC desc{}; desc.ArraySize = 1; desc.BindFlags = D3D11_BIND_RENDER_TARGET; desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; desc.Width = width; desc.Height = height; desc.MipLevels = 1; desc.SampleDesc.Count = sampleCount; desc.Usage = D3D11_USAGE_DEFAULT; Viewport.Width = static_cast<float>(desc.Width); Viewport.Height = static_cast<float>(desc.Height); Viewport.MaxDepth = 1.f; HRESULT hr = S_OK; if (MsaaEnabled) { hr = Graphics->CreateTexture2D(desc, &FinalRTMsaa); CHECKHR(hr); } desc.BindFlags = D3D11_BIND_DEPTH_STENCIL | D3D11_BIND_SHADER_RESOURCE; desc.Format = DXGI_FORMAT_R32_TYPELESS; hr = Graphics->CreateTexture2D(desc, DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_D32_FLOAT, &DepthBuffer); CHECKHR(hr); RTWidth = width; RTHeight = height; uint32_t headListWidth = RTWidth / NUM_PIXELS_PER_GROUP_X; uint32_t headListHeight = RTHeight / NUM_PIXELS_PER_GROUP_Y; LightLinkedListHeads = std::make_shared<Buffer>(); hr = LightLinkedListHeads->Initialize(Graphics->GetDevice(), sizeof(uint32_t), sizeof(uint32_t) * headListWidth * headListHeight, false, false); CHECKHR(hr); LightCullPass->SetCSBuffer(1, LightLinkedListHeads->GetUAV(), true); LightCullPass->SetCSResource(1, DepthBuffer->GetSRV()); ZPrePass->SetViewport(&Viewport); ZPrePass->SetDepthBuffer(DepthBuffer->GetDSV()); FinalPass->SetViewport(&Viewport); FinalPass->SetDepthBuffer(DepthBuffer->GetDSV()); FinalPass->SetPSResource(2, LightLinkedListHeads->GetSRV()); FinalPassMsaa->SetRenderTarget(0, FinalRTMsaa ? FinalRTMsaa->GetRTV() : nullptr); FinalPassMsaa->SetViewport(&Viewport); FinalPassMsaa->SetDepthBuffer(DepthBuffer->GetDSV()); FinalPassMsaa->SetPSResource(2, LightLinkedListHeads->GetSRV()); return hr; }
std::vector< BYTE > KinectAudioSource::Read() { mediaBuffer_.Clear(); do{ // 音声データを取得する DWORD dwStatus; CHECKHR( mediaObject_->ProcessOutput(0, 1, &outputBufferStruct_, &dwStatus) ); // ビームと音声の方向を取得する CHECKHR( soundSource_->GetBeam(&beamAngle_) ); CHECKHR( soundSource_->GetPosition(&soundSourcePosition_, &soundSourcePositionConfidence_) ); } while ( outputBufferStruct_.dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE ); return mediaBuffer_.Clone(); }
void R3DResource::Evict() { Fence->Wait(LastReferencedFenceValue); ComPtr<ID3D12Device> device; resource->GetDevice(&device); CHECKHR(device->Evict(1, resource.GetAddressOf())); }
void KinectAudioSource::Initialize() { CHECKHR( mediaObject_.CoCreateInstance(CLSID_CMSRKinectAudio, NULL, CLSCTX_INPROC_SERVER ) ); CHECKHR( mediaObject_.QueryInterface( &propertyStore_ ) ); CHECKHR( mediaObject_->QueryInterface( IID_ISoundSourceLocalizer, (void**)&soundSource_ ) ); // Tell DMO which capture device to use (we're using whichever device is a microphone array). // Default rendering device (speaker) will be used. int iMicDevIdx = GetMicArrayDeviceIndex(); PROPVARIANT pvDeviceId; PropVariantInit(&pvDeviceId); pvDeviceId.vt = VT_I4; //Speaker index is the two high order bytes and the mic index the two low order ones int iSpkDevIdx = 0; //Asume default speakers pvDeviceId.lVal = (unsigned long)(iSpkDevIdx<<16) | (unsigned long)(0x0000ffff & iMicDevIdx); CHECKHR(propertyStore_->SetValue(MFPKEY_WMAAECMA_DEVICE_INDEXES, pvDeviceId)); PropVariantClear(&pvDeviceId); }
// SINGLE_CHANNEL_AEC = 0 // OPTIBEAM_ARRAY_ONLY = 2 // OPTIBEAM_ARRAY_AND_AEC = 4 // SINGLE_CHANNEL_NSAGC = 5 void KinectAudioSource::SetSystemMode( LONG mode ) { // Set AEC-MicArray DMO system mode. // This must be set for the DMO to work properly PROPVARIANT pvSysMode; PropVariantInit(&pvSysMode); pvSysMode.vt = VT_I4; pvSysMode.lVal = mode; CHECKHR(propertyStore_->SetValue(MFPKEY_WMAAECMA_SYSTEM_MODE, pvSysMode)); PropVariantClear(&pvSysMode); }
// 音声データを取得する std::vector< BYTE > KinectAudio::read() { mediaBuffer_.Clear(); do{ // 音声データを取得する DWORD dwStatus; CHECKHR( mediaObject_->ProcessOutput(0, 1, &outputBufferStruct_, &dwStatus) ); } while ( outputBufferStruct_.dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE ); return mediaBuffer_.Clone(); }
void AssetLoaderStartup() { assert(g_assetLoader == nullptr); CHECKHR(CoInitialize(NULL)); delete g_assetLoader; g_assetLoader = new AssetLoader; DebugOut("AssetLoader initialized.\n"); }
HRESULT ShaderPass::InitializeCompute(const ComPtr<ID3D11Device>& device, const uint8_t* computeShader, size_t computeShaderNumBytes) { Device = device; Device->GetImmediateContext(&Context); Type = ShaderPassType::Compute; HRESULT hr = Device->CreateComputeShader(computeShader, computeShaderNumBytes, nullptr, &ComputeShader); CHECKHR(hr); return hr; }
void KinectAudioSource::Start() { DMO_MEDIA_TYPE mt = {0}; ULONG cbProduced = 0; memset( &outputBufferStruct_, 0, sizeof(outputBufferStruct_) ); outputBufferStruct_.pBuffer = &mediaBuffer_; // Set DMO output format CHECKHR( MoInitMediaType(&mt, sizeof(WAVEFORMATEX)) ); mt.majortype = MEDIATYPE_Audio; mt.subtype = MEDIASUBTYPE_PCM; mt.lSampleSize = 0; mt.bFixedSizeSamples = TRUE; mt.bTemporalCompression = FALSE; mt.formattype = FORMAT_WaveFormatEx; memcpy(mt.pbFormat, &GetWaveFormat(), sizeof(WAVEFORMATEX)); CHECKHR( mediaObject_->SetOutputType(0, &mt, 0) ); MoFreeMediaType(&mt); // Allocate streaming resources. This step is optional. If it is not called here, it // will be called when first time ProcessInput() is called. However, if you want to // get the actual frame size being used, it should be called explicitly here. CHECKHR( mediaObject_->AllocateStreamingResources() ); // Get actually frame size being used in the DMO. (optional, do as you need) int iFrameSize; PROPVARIANT pvFrameSize; PropVariantInit(&pvFrameSize); CHECKHR(propertyStore_->GetValue(MFPKEY_WMAAECMA_FEATR_FRAME_SIZE, &pvFrameSize)); iFrameSize = pvFrameSize.lVal; PropVariantClear(&pvFrameSize); // allocate output buffer mediaBuffer_.SetBufferLength( GetWaveFormat().nSamplesPerSec * GetWaveFormat().nBlockAlign ); }
HRESULT RTFformatting::WriteRedlineML(CComPtr<IXmlWriter>& xmlWriter,XmlWriteContext& context) const { if (GetParfmt() && GetParfmt()->get_bImplicit()) return S_OK; CHECKHR(xmlWriter->WriteStartElement(NULL, L"paraMarker", NULL)); if (IsListItem()) { CWideString wsText; wsText = GetListNumberText(); ASSERT(wsText.GetLength() != 0); CHECKHR(xmlWriter->WriteAttributeString(NULL, L"listNumber", NULL, wsText.GetData())); if (context.IsInsertedListItem()) { CHECKHR(xmlWriter->WriteAttributeString(NULL, L"isInserted", NULL, L"true")); } } return xmlWriter->WriteEndElement(); }
HRESULT GraphicsDevice::CreateConstantBuffer(const void* data, uint32_t dataSizeInBytes, std::shared_ptr<ConstantBuffer>* constantBuffer) { if (!constantBuffer) { assert(false); return E_POINTER; } *constantBuffer = std::make_shared<ConstantBuffer>(); HRESULT hr = (*constantBuffer)->Initialize(Device, data, dataSizeInBytes); CHECKHR(hr); return hr; }
HRESULT GraphicsDevice::CreateVertexBuffer(VertexFormat format, const void* data, uint32_t dataSizeInBytes, std::shared_ptr<VertexBuffer>* vertexBuffer) { if (!vertexBuffer) { assert(false); return E_POINTER; } *vertexBuffer = std::make_shared<VertexBuffer>(); HRESULT hr = (*vertexBuffer)->Initialize(Device, format, data, dataSizeInBytes); CHECKHR(hr); return hr; }
HRESULT GraphicsDevice::CreateTexture2D(const D3D11_TEXTURE2D_DESC& desc, DXGI_FORMAT rtvFormat, DXGI_FORMAT srvFormat, DXGI_FORMAT dsvFormat, std::shared_ptr<Texture2D>* texture) { if (!texture) { assert(false); return E_POINTER; } *texture = std::make_shared<Texture2D>(); HRESULT hr = (*texture)->Initialize(Device, desc, rtvFormat, srvFormat, dsvFormat); CHECKHR(hr); return hr; }
HRESULT GraphicsDevice::CreateTexture2D(const ComPtr<ID3D11Texture2D>& existing, std::shared_ptr<Texture2D>* texture) { if (!texture) { assert(false); return E_POINTER; } *texture = std::make_shared<Texture2D>(); HRESULT hr = (*texture)->WrapExisting(existing); CHECKHR(hr); return hr; }
HRESULT GraphicsDevice::CreateShaderPassCompute(const uint8_t* computeShader, size_t computeShaderNumBytes, std::shared_ptr<ShaderPass>* shaderPass) { if (!shaderPass) { assert(false); return E_POINTER; } *shaderPass = std::make_shared<ShaderPass>(); HRESULT hr = (*shaderPass)->InitializeCompute(Device, computeShader, computeShaderNumBytes); CHECKHR(hr); return hr; }
HRESULT GraphicsDevice::CreateTexture2D(const D3D11_TEXTURE2D_DESC& desc, const void* initData, std::shared_ptr<Texture2D>* texture) { if (!texture) { assert(false); return E_POINTER; } *texture = std::make_shared<Texture2D>(); HRESULT hr = (*texture)->Initialize(Device, desc, desc.Format, desc.Format, desc.Format, initData); CHECKHR(hr); return hr; }
/////////////////////////////////////////////////////////////////////////////// // GetJackSubtypeForEndpoint // // Gets the subtype of the jack that the specified endpoint device is plugged // into. E.g. if the endpoint is for an array mic, then we would expect the // subtype of the jack to be KSNODETYPE_MICROPHONE_ARRAY // /////////////////////////////////////////////////////////////////////////////// GUID KinectAudioSource::GetJackSubtypeForEndpoint( IMMDevice* pEndpoint ) { if ( pEndpoint == 0 ) { throw std::invalid_argument( "生成されていないインスタンスが渡されました" ); } CComPtr<IDeviceTopology> spEndpointTopology; CComPtr<IConnector> spPlug; CComPtr<IConnector> spJack; CComPtr<IPart> spJackAsPart; // Get the Device Topology interface CHECKHR( pEndpoint->Activate(__uuidof(IDeviceTopology), CLSCTX_INPROC_SERVER, NULL, (void**)&spEndpointTopology) ); CHECKHR( spEndpointTopology->GetConnector(0, &spPlug) ); CHECKHR( spPlug->GetConnectedTo( &spJack ) ); CHECKHR( spJack.QueryInterface( &spJackAsPart ) ); GUID subtype; CHECKHR( spJackAsPart->GetSubType( &subtype ) ); return subtype; }
std::unique_ptr<uint8_t[]> MixedApp::LoadImageFile(const wchar_t* filename, uint32_t* width, uint32_t* height) { ComPtr<IWICBitmapDecoder> decoder; HRESULT hr = WicFactory->CreateDecoderFromFilename(filename, nullptr, GENERIC_READ, WICDecodeMetadataCacheOnDemand, &decoder); CHECKHR(hr, L"Failed to create image decoder for file. %s, hr = 0x%08x.", filename, hr); ComPtr<IWICBitmapFrameDecode> frame; hr = decoder->GetFrame(0, &frame); CHECKHR(hr, L"Failed to decode image frame. hr = 0x%08x.", hr); ComPtr<IWICFormatConverter> converter; hr = WicFactory->CreateFormatConverter(&converter); CHECKHR(hr, L"Failed to create image format converter. hr = 0x%08x.", hr); hr = converter->Initialize(frame.Get(), GUID_WICPixelFormat32bppRGBA, WICBitmapDitherTypeNone, nullptr, 0, WICBitmapPaletteTypeCustom); CHECKHR(hr, L"Failed to initialize image format converter. hr = 0x%08x.", hr); frame->GetSize(width, height); std::unique_ptr<uint8_t[]> pixels(new uint8_t[(*width) * (*height) * sizeof(uint32_t)]); hr = converter->CopyPixels(nullptr, sizeof(uint32_t) * (*width), sizeof(uint32_t) * (*width) * (*height), pixels.get()); CHECKHR(hr, L"Failed to decode image pixels. hr = 0x%08x.", hr); return pixels; }
/////////////////////////////////////////////////////////////////////////// // GetMicArrayDeviceIndex // // Obtain device index corresponding to microphone array device. // // Parameters: piDevice: [out] Index of microphone array device. // // Return: S_OK if successful // Failure code otherwise (e.g.: if microphone array device is not found). // /////////////////////////////////////////////////////////////////////////////// int KinectAudioSource::GetMicArrayDeviceIndex() { CComPtr<IMMDeviceEnumerator> spEnumerator; CHECKHR( spEnumerator.CoCreateInstance( __uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL ) ); CComPtr<IMMDeviceCollection> spEndpoints; CHECKHR( spEnumerator->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &spEndpoints ) ); UINT dwCount = 0; CHECKHR(spEndpoints->GetCount(&dwCount)); // Iterate over all capture devices until finding one that is a microphone array for ( UINT index = 0; index < dwCount; index++) { IMMDevice* spDevice; CHECKHR( spEndpoints->Item( index, &spDevice ) ); GUID subType = GetJackSubtypeForEndpoint( spDevice ); if ( subType == KSNODETYPE_MICROPHONE_ARRAY ) { return index; } } throw std::runtime_error( "デバイスが見つかりません" ); }
// Shader creation HRESULT GraphicsDevice::CreateShaderPassGraphics(VertexFormat format, const uint8_t* vertexShader, size_t vertexShaderNumBytes, const uint8_t* pixelShader, size_t pixelShaderNumBytes, std::shared_ptr<ShaderPass>* shaderPass) { if (!shaderPass) { assert(false); return E_POINTER; } *shaderPass = std::make_shared<ShaderPass>(); HRESULT hr = (*shaderPass)->InitializeGraphics(Device, format, vertexShader, vertexShaderNumBytes, pixelShader, pixelShaderNumBytes); CHECKHR(hr); return hr; }
MixedApp::MixedApp(HINSTANCE instance) : Window(nullptr) { #if HACK_GENERATE_GAUSSIAN_KERNEL // Move this somewhere else! float o = 0.9f; // scale float matrix[7]{}; // -3 to 3 float sum = 0.f; for (int i = -3; i <= 3; ++i) { matrix[i + 3] = exp(-(i * i) / (2 * o * o)) / sqrtf(2 * XM_PI * (o * o)); sum += matrix[i + 3]; } // Normalize for (int i = -3; i <= 3; ++i) { matrix[i + 3] = matrix[i + 3] / sum; } #endif CoInitializeEx(nullptr, COINIT_MULTITHREADED); HRESULT hr = CoCreateInstance(CLSID_WICImagingFactory, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&WicFactory)); CHECKHR(hr, L"Failed to create WIC factory. hr = 0x%08x.", hr); InitializeWindow(instance); Renderer.reset(new ::Renderer(Window)); uint32_t width = 0, height = 0; std::unique_ptr<uint8_t[]> pixels = LoadImageFile(L"car2.jpg", &width, &height); Color = Renderer->CreateColorImage(width, height, (const uint32_t*)pixels.get()); Lum = Renderer->CreateLuminanceImage(width, height, nullptr); Norm = Renderer->CreateNormalsImage(width, height, nullptr); Blurred = Renderer->CreateColorImage(width, height, nullptr); Edges1 = Renderer->CreateLuminanceImage(width, height, nullptr); Edges2 = Renderer->CreateLuminanceImage(width, height, nullptr); Renderer->ColorToLum(Color, Lum); Renderer->LumToNormals(Lum, Norm); Renderer->Gaussian(Color, Blurred); Renderer->EdgeDetect(Color, Edges1); Renderer->EdgeDetect(Blurred, Edges2); }
HRESULT GraphicsDevice::Initialize(const ComPtr<IDXGIFactory2>& factory, const ComPtr<IDXGIAdapter>& adapter, bool createDebug) { Factory = factory; Adapter = adapter; uint32_t flags = 0; if (createDebug) { flags |= D3D11_CREATE_DEVICE_DEBUG; } D3D_FEATURE_LEVEL featureLevel = D3D_FEATURE_LEVEL_11_0; HRESULT hr = D3D11CreateDevice(Adapter.Get(), Adapter ? D3D_DRIVER_TYPE_UNKNOWN : D3D_DRIVER_TYPE_HARDWARE, nullptr, flags, &featureLevel, 1, D3D11_SDK_VERSION, &Device, nullptr, &Context); CHECKHR(hr); // create common states D3D11_SAMPLER_DESC sd{}; sd.AddressU = sd.AddressV = sd.AddressW = D3D11_TEXTURE_ADDRESS_WRAP; sd.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR; sd.MaxLOD = D3D11_FLOAT32_MAX; hr = Device->CreateSamplerState(&sd, &LinearWrapSampler); CHECKHR(hr); sd.AddressU = sd.AddressV = sd.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP; sd.Filter = D3D11_FILTER_MIN_MAG_MIP_POINT; hr = Device->CreateSamplerState(&sd, &PointClampSampler); CHECKHR(hr); sd.AddressU = sd.AddressV = sd.AddressW = D3D11_TEXTURE_ADDRESS_WRAP; sd.Filter = D3D11_FILTER_ANISOTROPIC; sd.MaxAnisotropy = 8; hr = Device->CreateSamplerState(&sd, &AnisoWrapSampler); CHECKHR(hr); D3D11_DEPTH_STENCIL_DESC dsd{}; dsd.DepthEnable = TRUE; dsd.DepthFunc = D3D11_COMPARISON_LESS; dsd.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL; hr = Device->CreateDepthStencilState(&dsd, &DepthWriteState); CHECKHR(hr); dsd.DepthFunc = D3D11_COMPARISON_LESS_EQUAL; dsd.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ZERO; hr = Device->CreateDepthStencilState(&dsd, &DepthReadState); CHECKHR(hr); return hr; }
HRESULT ForwardPlusRenderer::RenderFrame(const RenderTarget& renderTarget, const RenderView& view) { HRESULT hr = S_OK; if (RTWidth != renderTarget.Texture->GetDesc().Width || RTHeight != renderTarget.Texture->GetDesc().Height) { hr = RecreateSurfaces(renderTarget.Texture->GetDesc().Width, renderTarget.Texture->GetDesc().Height, MsaaEnabled ? 4 : 1); CHECKHR(hr); } // Scene traversal once, used for multiple passes Scene->GetVisibleVisuals(view, &Visuals); Scene->GetVisibleLights(view, &Lights); RenderZPrePass(view); CullLights(view); RenderFinal(view, renderTarget); return S_OK; }
static void AppInit(int width, int height, const char* title) { CHECKHR(SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE)); WNDCLASSEXW wc = {}; wc.cbSize = sizeof(wc); wc.lpfnWndProc = WndProc; wc.hInstance = GetModuleHandleW(NULL); wc.hCursor = LoadCursorW(NULL, IDC_ARROW); wc.lpszClassName = L"WindowClass"; CHECKWIN32(RegisterClassExW(&wc)); DWORD dwStyle = WS_OVERLAPPEDWINDOW; DWORD dwExStyle = 0; RECT wr = { 0, 0, width, height }; CHECKWIN32(AdjustWindowRectEx(&wr, dwStyle, FALSE, dwExStyle)); std::wstring wtitle = WideFromMultiByte(title); HWND hWnd = CreateWindowExW( dwExStyle, L"WindowClass", wtitle.c_str(), dwStyle, CW_USEDEFAULT, CW_USEDEFAULT, wr.right - wr.left, wr.bottom - wr.top, NULL, NULL, GetModuleHandleW(NULL), NULL); CHECKWIN32(hWnd != NULL); RendererInit(hWnd); RECT cr; CHECKWIN32(GetClientRect(hWnd, &cr)); RendererResize( cr.right - cr.left, cr.bottom - cr.top, cr.right - cr.left, cr.bottom - cr.top); ShowWindow(hWnd, SW_SHOWDEFAULT); g_App.hWnd = hWnd; g_App.bShouldClose = false; }
HRESULT IndexBuffer::Initialize(const ComPtr<ID3D11Device>& device, const void* data, uint32_t dataBytes) { IB = nullptr; D3D11_BUFFER_DESC desc{}; desc.BindFlags = D3D11_BIND_INDEX_BUFFER; desc.ByteWidth = dataBytes; desc.StructureByteStride = sizeof(uint32_t); desc.Usage = D3D11_USAGE_DEFAULT; assert(desc.ByteWidth % desc.StructureByteStride == 0); D3D11_SUBRESOURCE_DATA init{}; init.pSysMem = data; init.SysMemPitch = desc.ByteWidth; HRESULT hr = device->CreateBuffer(&desc, &init, &IB); CHECKHR(hr); BaseIndex = 0; IndexCount = desc.ByteWidth / desc.StructureByteStride; return hr; }
HRESULT R3DResource::MakeResident() { ComPtr<ID3D12Device> device; resource->GetDevice(&device); CHECKHR(device->MakeResident(1, resource.GetAddressOf())); }
_Use_decl_annotations_ StaticLevelData::StaticLevelData(const ComPtr<ID3D11DeviceContext>& context, const StaticGeometryVertex* vertices, uint32_t numVertices, const uint32_t* indices, uint32_t numIndices, std::unique_ptr<MaterialSource[]>& materials, uint32_t numMaterials, size_t textureBudgetBytes) : _context(context), _numMaterials(numMaterials), _numVisibleIndices(0), _numIndices(numIndices), _numVisibleMaterials(0), _numResident(0) { //_spatial.reset(BspCompiler::CreateFromTriangles(vertices, numVertices, indices, numIndices)); _spatial.reset(BIH::CreateFromTriangles(vertices, numVertices, indices, numIndices)); //_spatial.reset(KdTreeCompiler2::CreateFromTriangles(vertices, numVertices, indices, numIndices)); //{ // std::unique_ptr<KdTreeCompiler> kdTree(KdTreeCompiler::CreateFromTriangles(vertices, numVertices, indices, numIndices)); //} ComPtr<ID3D11Device> device; context->GetDevice(&device); D3D11_BUFFER_DESC bd = {}; bd.BindFlags = D3D11_BIND_VERTEX_BUFFER; bd.StructureByteStride = sizeof(StaticGeometryVertex); bd.ByteWidth = bd.StructureByteStride * numVertices; bd.Usage = D3D11_USAGE_DEFAULT; D3D11_SUBRESOURCE_DATA init = {}; init.pSysMem = vertices; init.SysMemPitch = bd.ByteWidth; CHECKHR(device->CreateBuffer(&bd, &init, &_vertices)); bd.BindFlags = D3D11_BIND_INDEX_BUFFER; bd.StructureByteStride = sizeof(uint32_t); bd.ByteWidth = bd.StructureByteStride * numIndices; init.pSysMem = indices; init.SysMemPitch = bd.ByteWidth; CHECKHR(device->CreateBuffer(&bd, &init, &_indices)); _visibleIndices.reset(new uint32_t[numIndices]); _visibleMaterials.reset(new uint32_t[numIndices]); // // Determine size of texture array and texture resolution based on number of textures we // need to handle, and the texture budget we've been given. Equation formulated offline, and // is based on caching mip level 2-n for all textures, and then keeping a smaller number // of resident full textures, including mips 0 & 1. // // 3/4 * (budget / mip0Size) - 3/48 * totalTextures = textureArraySize // // We start by trying with the largest mip size we support (1024x1024), and compute textureArraySize. // If this value is enough, then we stop. Otherwise, we move down to the next mip resolution as our target // and recompute. We continue iterating until we find one that allows us to hold enough concurrent textures // resident at the current budget. That mip level is our top texture resolution for the level. // // Assuming DXGI_FORMAT_R8G8B8A8_UNORM for now. If we decide to move to another format, we need to adjust uint32_t numTextures = numMaterials * 2 + 1; // diffuse & normals, plus 1 for error texture uint32_t bytesPerPixel = 4; uint32_t size = 1024; uint32_t mip0Size = size * size * bytesPerPixel; static const float c1 = 3.0f / 48.0f; _textureArraySize = (uint32_t)(0.75f * (textureBudgetBytes / (size_t)mip0Size) - c1 * numTextures); uint32_t minTextureArraySize = 32; uint32_t initialMip = 0; while (_textureArraySize < minTextureArraySize && size > 16) { size /= 2; mip0Size = size * size * bytesPerPixel; _textureArraySize = (uint32_t)(0.75f * (textureBudgetBytes / (size_t)mip0Size) - c1 * numTextures); ++initialMip; } if (size == 16) { Error("Could not satisfy the number of textures required with the budget provided."); } D3D11_TEXTURE2D_DESC td = {}; td.BindFlags = D3D11_BIND_SHADER_RESOURCE; td.ArraySize = 1; td.Format = DXGI_FORMAT_R32G32_UINT; td.Width = numMaterials; td.Height = 1; td.MipLevels = 1; td.SampleDesc.Count = 1; td.Usage = D3D11_USAGE_DYNAMIC; td.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; ComPtr<ID3D11Texture2D> texture; CHECKHR(device->CreateTexture2D(&td, nullptr, &texture)); CHECKHR(device->CreateShaderResourceView(texture.Get(), nullptr, &_materialMap)); td.ArraySize = _textureArraySize; td.Format = DXGI_FORMAT_R8G8B8A8_UNORM; td.Height = size; td.Width = size; td.MipLevels = 0; td.Usage = D3D11_USAGE_DEFAULT; td.CPUAccessFlags = 0; CHECKHR(device->CreateTexture2D(&td, nullptr, &texture)); CHECKHR(device->CreateShaderResourceView(texture.Get(), nullptr, &_textureArray)); _mapEntries.reset(new MaterialMapEntry[numMaterials]); ZeroMemory(_mapEntries.get(), numMaterials * sizeof(MaterialMapEntry)); _residency.reset(new MaterialResidency[numMaterials]); ZeroMemory(_residency.get(), numMaterials * sizeof(MaterialResidency)); bd.BindFlags = D3D11_BIND_CONSTANT_BUFFER; bd.StructureByteStride = sizeof(PerFrame); bd.ByteWidth = bd.StructureByteStride; CHECKHR(device->CreateBuffer(&bd, nullptr, &_perFrameCB)); D3D11_INPUT_ELEMENT_DESC elems[4] = {}; elems[0].Format = DXGI_FORMAT_R32G32B32_FLOAT; elems[0].SemanticName = "POSITION"; elems[0].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA; elems[1].AlignedByteOffset = sizeof(XMFLOAT3); elems[1].Format = DXGI_FORMAT_R32G32B32_FLOAT; elems[1].SemanticName = "NORMAL"; elems[1].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA; elems[2].AlignedByteOffset = 2 * sizeof(XMFLOAT3); elems[2].Format = DXGI_FORMAT_R32G32_FLOAT; elems[2].SemanticName = "TEXCOORD"; elems[2].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA; elems[3].AlignedByteOffset = 2 * sizeof(XMFLOAT3) + sizeof(XMFLOAT2); elems[3].Format = DXGI_FORMAT_R32_UINT; elems[3].SemanticName = "TEXCOORD"; elems[3].SemanticIndex = 1; elems[3].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA; size_t length; auto buffer = ReadFile("staticlevel_vs.cso", &length); CHECKHR(device->CreateVertexShader(buffer.get(), length, nullptr, &_vertexShader)); CHECKHR(device->CreateInputLayout(elems, _countof(elems), buffer.get(), length, &_inputLayout)); buffer = ReadFile("staticlevel_ps.cso", &length); CHECKHR(device->CreatePixelShader(buffer.get(), length, nullptr, &_pixelShader)); _textureStreamer.reset(new TextureStreamer(materials, numMaterials, texture, initialMip)); }
void StaticLevelData::Draw(const XMFLOAT4X4& cameraWorld, const XMFLOAT4X4& view, const XMFLOAT4X4& projection) { { auto lock = GetGraphics().LockContext(); // // Prepare for rendering the level // static const uint32_t stride = sizeof(StaticGeometryVertex); static const uint32_t offset = 0; _context->IASetVertexBuffers(0, 1, _vertices.GetAddressOf(), &stride, &offset); _context->IASetIndexBuffer(_indices.Get(), DXGI_FORMAT_R32_UINT, 0); _context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); _context->IASetInputLayout(_inputLayout.Get()); _context->VSSetShader(_vertexShader.Get(), nullptr, 0); _context->PSSetShader(_pixelShader.Get(), nullptr, 0); _context->VSSetConstantBuffers(0, 1, _perFrameCB.GetAddressOf()); _context->VSSetShaderResources(0, 1, _materialMap.GetAddressOf()); _context->PSSetShaderResources(0, 1, _textureArray.GetAddressOf()); _context->PSSetSamplers(0, 1, GetGraphics().GetLinearWrapSampler().GetAddressOf()); } // // Gather the visible indices // //UNREFERENCED_PARAMETER(cameraWorld); _spatial->BuildVisibleIndexList(XMLoadFloat4x4(&cameraWorld), XMLoadFloat4x4(&projection), _visibleIndices.get(), _numIndices, &_numVisibleIndices, _visibleMaterials.get(), _numMaterials, &_numVisibleMaterials); // // Ensure materials are paged in and update the IB // { auto lock = GetGraphics().LockContext(); uint32_t* materialId = _visibleMaterials.get(); for (uint32_t i = 0; i < _numVisibleMaterials; ++i, ++materialId) { EnsureMaterial(*materialId); } ComPtr<ID3D11Resource> resource; _materialMap->GetResource(&resource); D3D11_MAPPED_SUBRESOURCE mapped; CHECKHR(_context->Map(resource.Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped)); memcpy_s(mapped.pData, mapped.RowPitch, _mapEntries.get(), sizeof(MaterialMapEntry) * _numMaterials); _context->Unmap(resource.Get(), 0); _perFrame.View = view; _perFrame.Projection = projection; _context->UpdateSubresource(_perFrameCB.Get(), 0, nullptr, &_perFrame, sizeof(_perFrame), 0); D3D11_BOX box = {}; box.right = _numVisibleIndices * sizeof(uint32_t); box.bottom = 1; box.back = 1; _context->UpdateSubresource(_indices.Get(), 0, &box, _visibleIndices.get(), box.right, box.right); _context->DrawIndexed(_numVisibleIndices, 0, 0); } }