コード例 #1
0
D3D11_TEXTURE2D_DESC SpriteRenderer::SetPerBatchData(ID3D11ShaderResourceView* texture)
{
    // Set per-batch constants
    VSPerBatchCB perBatch;

    // Get the viewport dimensions
    UINT numViewports = 1;
    D3D11_VIEWPORT vp;
    context->RSGetViewports(&numViewports, &vp);
    perBatch.ViewportSize = XMFLOAT2(static_cast<float>(vp.Width), static_cast<float>(vp.Height));

    // Get the size of the texture
    ID3D11Resource* resource;
    ID3D11Texture2DPtr texResource;
    D3D11_TEXTURE2D_DESC desc;
    texture->GetResource(&resource);
    texResource.Attach(reinterpret_cast<ID3D11Texture2D*>(resource));
    texResource->GetDesc(&desc);
    perBatch.TextureSize = XMFLOAT2(static_cast<float>(desc.Width), static_cast<float>(desc.Height));

    // Copy it into the buffer
    D3D11_MAPPED_SUBRESOURCE mapped;
    DXCall(context->Map(vsPerBatchCB, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped));
    CopyMemory(mapped.pData, &perBatch, sizeof(VSPerBatchCB));
    context->Unmap(vsPerBatchCB, 0);

    return desc;
}
コード例 #2
0
	void SetDebugName(const char* name) {
		if (mTexture)
			mTexture->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(name), name);
		if (mSRView)
		{
			char buff[255];
			sprintf_s(buff, "%s SRV", name);
			mSRView->SetPrivateData(WKPDID_D3DDebugObjectName, 0, 0);
			mSRView->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(buff), buff);
		}
		int i = 0;
		for (auto it : mRTViews)
		{
			char buff[255];
			sprintf_s(buff, "%s RTV %d", name, i++);
			it->SetPrivateData(WKPDID_D3DDebugObjectName, 0, 0);
			it->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(buff), buff);
		}

		i = 0;
		for (auto it : mDSViews)
		{
			char buff[255];
			sprintf_s(buff, "%s DSV %d", name, i++);
			it->SetPrivateData(WKPDID_D3DDebugObjectName, 0, 0);
			it->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(buff), buff);
		}
	}
コード例 #3
0
	//--------------------------------------------------------------------
	// Own
	//--------------------------------------------------------------------
	ID3D11Texture2D* GetHardwareTexture() {
		if (!mTexture && mSRView){
			ID3D11Resource* resource = 0;
			mSRView->GetResource(&resource);
			mTexture = ID3D11Texture2DPtr((ID3D11Texture2D*)resource, IUnknownDeleter());
		}
		return mTexture.get();
	}
コード例 #4
0
static void GetTextureData(ID3D11Device* device, ID3D11ShaderResourceView* textureSRV,
                           DXGI_FORMAT outFormat, TextureData<T>& texData)
{
    static ComputeShaderPtr decodeTextureCS;
    static ComputeShaderPtr decodeTextureArrayCS;

    static const uint32 TGSize = 16;

    if(decodeTextureCS.Valid() == false)
    {
        CompileOptions opts;
        opts.Add("TGSize_", TGSize);
        const std::wstring shaderPath = SampleFrameworkDir() + L"Shaders\\DecodeTextureCS.hlsl";
        decodeTextureCS = CompileCSFromFile(device, shaderPath.c_str(), "DecodeTextureCS", "cs_5_0", opts);

        decodeTextureArrayCS = CompileCSFromFile(device, shaderPath.c_str(), "DecodeTextureArrayCS", "cs_5_0", opts);
    }

    ID3D11Texture2DPtr texture;
    textureSRV->GetResource(reinterpret_cast<ID3D11Resource**>(&texture));

    D3D11_TEXTURE2D_DESC texDesc;
    texture->GetDesc(&texDesc);

    D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
    textureSRV->GetDesc(&srvDesc);

    ID3D11ShaderResourceViewPtr sourceSRV = textureSRV;
    uint32 arraySize = texDesc.ArraySize;
    if(srvDesc.ViewDimension == D3D11_SRV_DIMENSION_TEXTURECUBE
       || srvDesc.ViewDimension == D3D11_SRV_DIMENSION_TEXTURECUBEARRAY)
    {
        srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY;
        srvDesc.Texture2DArray.ArraySize = arraySize;
        srvDesc.Texture2DArray.FirstArraySlice = 0;
        srvDesc.Texture2DArray.MostDetailedMip = 0;
        srvDesc.Texture2DArray.MipLevels = -1;
        DXCall(device->CreateShaderResourceView(texture, &srvDesc, &sourceSRV));
    }

    D3D11_TEXTURE2D_DESC decodeTextureDesc;
    decodeTextureDesc.Width = texDesc.Width;
    decodeTextureDesc.Height = texDesc.Height;
    decodeTextureDesc.ArraySize = arraySize;
    decodeTextureDesc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
    decodeTextureDesc.Format = outFormat;
    decodeTextureDesc.MipLevels = 1;
    decodeTextureDesc.MiscFlags = 0;
    decodeTextureDesc.SampleDesc.Count = 1;
    decodeTextureDesc.SampleDesc.Quality = 0;
    decodeTextureDesc.Usage = D3D11_USAGE_DEFAULT;
    decodeTextureDesc.CPUAccessFlags = 0;

    ID3D11Texture2DPtr decodeTexture;
    DXCall(device->CreateTexture2D(&decodeTextureDesc, nullptr, &decodeTexture));

    ID3D11UnorderedAccessViewPtr decodeTextureUAV;
    DXCall(device->CreateUnorderedAccessView(decodeTexture, nullptr, &decodeTextureUAV));

    ID3D11DeviceContextPtr context;
    device->GetImmediateContext(&context);

    SetCSInputs(context, sourceSRV);
    SetCSOutputs(context, decodeTextureUAV);
    SetCSShader(context, arraySize > 1 ? decodeTextureArrayCS : decodeTextureCS);

    context->Dispatch(DispatchSize(TGSize, texDesc.Width), DispatchSize(TGSize, texDesc.Height), arraySize);

    ClearCSInputs(context);
    ClearCSOutputs(context);

    StagingTexture2D stagingTexture;
    stagingTexture.Initialize(device, texDesc.Width, texDesc.Height, outFormat, 1, 1, 0, arraySize);
    context->CopyResource(stagingTexture.Texture, decodeTexture);

    texData.Init(texDesc.Width, texDesc.Height, arraySize);

    for(uint32 slice = 0; slice < arraySize; ++slice)
    {
        uint32 pitch = 0;
        const uint8* srcData = reinterpret_cast<const uint8*>(stagingTexture.Map(context, slice, pitch));
        Assert_(pitch >= texDesc.Width * sizeof(T));

        const uint32 sliceOffset = texDesc.Width * texDesc.Height * slice;

        for(uint32 y = 0; y < texDesc.Height; ++y)
        {
            const T* rowData = reinterpret_cast<const T*>(srcData);

            for(uint32 x = 0; x < texDesc.Width; ++x)
                texData.Texels[y * texDesc.Width + x + sliceOffset] = rowData[x];

            srcData += pitch;
        }
    }
}
コード例 #5
0
void PostProcessorBase::PostProcess(ID3D11PixelShader* pixelShader, const wchar* name)
{
    Assert_(context);

    Assert_(inputs.size() <= MaxInputs);

    D3DPERF_BeginEvent(0xFFFFFFFF, name);

    // Set the outputs
    ID3D11RenderTargetView** renderTargets = reinterpret_cast<ID3D11RenderTargetView**>(&outputs[0]);
    uint32 numRTs = static_cast<uint32>(outputs.size());
    if(uaViews.size() == 0)
        context->OMSetRenderTargets(numRTs, renderTargets, nullptr);
    else
    {
        ID3D11UnorderedAccessView** uavs = reinterpret_cast<ID3D11UnorderedAccessView**>(&uaViews[0]);
        UINT numUAVs = static_cast<uint32>(uaViews.size());
        UINT initialCounts[D3D11_PS_CS_UAV_REGISTER_COUNT] = { 0 };
        context->OMSetRenderTargetsAndUnorderedAccessViews(numRTs, renderTargets, nullptr, numRTs, numUAVs, uavs, initialCounts);
    }

    // Set the input textures
    ID3D11ShaderResourceView** textures = reinterpret_cast<ID3D11ShaderResourceView**>(&inputs[0]);
    context->PSSetShaderResources(0, static_cast<uint32>(inputs.size()), textures);

    // Set the constants
    D3D11_MAPPED_SUBRESOURCE mapped;
    DXCall(context->Map(psConstants, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped));
    PSConstants* constants = reinterpret_cast<PSConstants*>(mapped.pData);

    for (size_t i = 0; i < inputs.size(); ++i)
    {
        if(inputs[i] == nullptr)
        {
            constants->InputSize[i].x = 0.0f;
            constants->InputSize[i].y = 0.0f;
            continue;
        }

        ID3D11Resource* resource;
        ID3D11Texture2DPtr texture;
        D3D11_TEXTURE2D_DESC desc;
        D3D11_SHADER_RESOURCE_VIEW_DESC srDesc;
        inputs[i]->GetDesc(&srDesc);
        uint32 mipLevel = srDesc.Texture2D.MostDetailedMip;
        inputs[i]->GetResource(&resource);
        texture.Attach(reinterpret_cast<ID3D11Texture2D*>(resource));
        texture->GetDesc(&desc);
        constants->InputSize[i].x = static_cast<float>(std::max<uint32>(desc.Width / (1 << mipLevel), 1));
        constants->InputSize[i].y = static_cast<float>(std::max<uint32>(desc.Height / (1 << mipLevel), 1));
    }

    ID3D11Resource* resource;
    ID3D11Texture2DPtr texture;
    D3D11_TEXTURE2D_DESC desc;
    D3D11_RENDER_TARGET_VIEW_DESC rtDesc;
    outputs[0]->GetResource(&resource);
    outputs[0]->GetDesc(&rtDesc);
    uint32 mipLevel = rtDesc.Texture2D.MipSlice;
    texture.Attach(reinterpret_cast<ID3D11Texture2D*>(resource));
    texture->GetDesc(&desc);
    constants->OutputSize.x = static_cast<float>(std::max<uint32>(desc.Width / (1 << mipLevel), 1));
    constants->OutputSize.y = static_cast<float>(std::max<uint32>(desc.Height / (1 << mipLevel), 1));

    context->Unmap(psConstants, 0);

    ID3D11Buffer* constantBuffers[1] = { psConstants };
    context->PSSetConstantBuffers(0, 1, constantBuffers);

    // Set the viewports
    D3D11_VIEWPORT viewports[16];
    for (UINT_PTR i = 0; i < 16; ++i)
    {
        viewports[i].Width = static_cast<float>(std::max<uint32>(desc.Width / (1 << mipLevel), 1));
        viewports[i].Height = static_cast<float>(std::max<uint32>(desc.Height / (1 << mipLevel), 1));
        viewports[i].TopLeftX = 0;
        viewports[i].TopLeftY = 0;
        viewports[i].MinDepth = 0.0f;
        viewports[i].MaxDepth = 1.0f;
    }
    context->RSSetViewports(static_cast<uint32>(outputs.size()), viewports);

    // Set the pixel shader
    context->PSSetShader(pixelShader, nullptr, 0);

    // Draw the quad
    context->DrawIndexed(6, 0, 0);

    // Clear the SRV's and RT's
    ID3D11ShaderResourceView* srViews[16] = { nullptr };
    context->PSSetShaderResources(0, static_cast<uint32>(inputs.size()), srViews);

    ID3D11RenderTargetView* rtViews[16] = { nullptr };
    context->OMSetRenderTargets(static_cast<uint32>(outputs.size() + uaViews.size()), rtViews, nullptr);

    inputs.clear();
    outputs.clear();
    uaViews.clear();

    texture = nullptr;
    D3DPERF_EndEvent();
}
コード例 #6
0
	void Unmap(UINT subResource) const {
		RendererD3D11::GetInstance().UnmapBuffer(mTexture.get(), subResource);
	}
コード例 #7
0
	MapData Map(UINT subResource, MAP_TYPE type, MAP_FLAG flag) const {
		return RendererD3D11::GetInstance().MapBuffer(mTexture.get(), subResource, type, flag);
	}
コード例 #8
0
ファイル: SH.cpp プロジェクト: DanielNeander/my-3d-engine
SH9Color ProjectCubemapToSH9Color(ID3D11DeviceContext* context, ID3D11ShaderResourceView* cubeMap)
{
    ID3D11Texture2DPtr srcTexture;
    cubeMap->GetResource(reinterpret_cast<ID3D11Resource**>(&srcTexture));

    D3D11_TEXTURE2D_DESC srcDesc;
    srcTexture->GetDesc(&srcDesc);

    ID3D11DevicePtr device;
    context->GetDevice(&device);

    ID3D11Texture2DPtr tempTexture;
    D3D11_TEXTURE2D_DESC tempDesc = srcDesc;
    tempDesc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
    tempDesc.MipLevels = 1;
    tempDesc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
    tempDesc.Usage = D3D11_USAGE_DEFAULT;
    DXCall(device->CreateTexture2D(&tempDesc, NULL, &tempTexture));

    ID3D11UnorderedAccessViewPtr tempUAV;
    DXCall(device->CreateUnorderedAccessView(tempTexture, NULL, &tempUAV));

    ID3D11ShaderResourceViewPtr tempSRV;
    D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
    srvDesc.Format = srcDesc.Format;
    srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY;
    srvDesc.Texture2DArray.MostDetailedMip = 0;
    srvDesc.Texture2DArray.MipLevels = srcDesc.MipLevels;
    srvDesc.Texture2DArray.FirstArraySlice = 0;
    srvDesc.Texture2DArray.ArraySize = 6;
    DXCall(device->CreateShaderResourceView(srcTexture, &srvDesc, &tempSRV));

    static const UINT32 TGSize = 1024;
    static ID3D11ComputeShaderPtr decodeShader;
    if(decodeShader.GetInterfacePtr() == NULL)
    {
        CompileOptions opts;
        opts.Add("TGSize_", TGSize);
        decodeShader.Attach(CompileCSFromFile(device, L"SampleFramework11\\Shaders\\DecodeTextureCS.hlsl", 
                                                "DecodeTextureCS", "cs_5_0", opts.Defines()));
    }

    ID3D11ShaderResourceView* srvs[1] = { tempSRV };
    context->CSSetShaderResources(0, 1, srvs);

    ID3D11UnorderedAccessView* uavs[1] = { tempUAV };
    context->CSSetUnorderedAccessViews(0, 1, uavs, NULL);

    context->CSSetShader(decodeShader, NULL, 0);

    context->Dispatch(DispatchSize(TGSize, srcDesc.Width), srcDesc.Height, 6);

    float red[9];
    float green[9];
    float blue[9];

    DXCall(D3DX11SHProjectCubeMap(context, 3, tempTexture, red, green, blue));    

    SH9Color sh;
    for(UINT_PTR i = 0; i < 9; ++i)
        sh.c[i] = XMVectorSet(red[i], green[i], blue[i], 0.0f);

    return sh;
}
コード例 #9
0
ファイル: sfmf.cpp プロジェクト: sfpgmr/sfmf2
	video_writer::video_writer(
		std::wstring& target_path, IMFMediaTypePtr& audio_media_type, ID3D11DeviceContext2Ptr& context, ID3D11Texture2DPtr& texture
		/*, unsigned int width, unsigned int height*/) : target_path_(target_path), audio_media_type_(audio_media_type), context_(context), texture_(texture)
	{
		D3D11_TEXTURE2D_DESC desc = {};
		texture->GetDesc(&desc);
		width_ = desc.Width;
		height_ = desc.Height;

		const unsigned int WIDTH = width_;
		const unsigned int HEIGHT = height_;
		const unsigned int BITRATE = 3000000;
		const unsigned int ASPECT_NUM = 1;
		const unsigned int ASPECT_DENOM = 1;
		const unsigned long  BPP_IN = 32;
		const unsigned long cbMaxLength = WIDTH * HEIGHT * BPP_IN / 8;
		const unsigned int ONE_SECOND = RATE_NUM / RATE_DENOM;
		const unsigned int FRAME_NUM = 10 * ONE_SECOND;

		samples_per_second = 44100;
		average_bytes_per_second = 24000;
		channel_count = 2;
		bits_per_sample = 16;

		// 入力ストリームから SinkWriterを生成する

		CHK(MFCreateFile(MF_FILE_ACCESSMODE::MF_ACCESSMODE_WRITE, MF_FILE_OPENMODE::MF_OPENMODE_DELETE_IF_EXIST, MF_FILE_FLAGS::MF_FILEFLAGS_NONE, target_path.c_str(), &byte_stream_));

		CHK(MFCreateAttributes(&attr_, 10));
		CHK(attr_->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, true));
		CHK(attr_->SetUINT32(MF_READWRITE_DISABLE_CONVERTERS, false));
		CHK(attr_->SetUINT32(MF_SINK_WRITER_DISABLE_THROTTLING, true));




		IMFSinkWriterPtr sinkWriter;

		CHK(MFCreateSinkWriterFromURL(L".mp4", byte_stream_.Get(), attr_.Get(), &sinkWriter));
		CHK(sinkWriter.As(&sink_writer_));
		//CHK(MFCreateSinkWriterFromURL(L".mp4", byte_stream_.Get(), attr_.Get(), &sink_writer_));



		//   
		// 出力メディアタイプのセットアップ   
		//   

		// ビデオ

		CHK(MFCreateMediaType(&media_type_out_));
		CHK(media_type_out_->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video));
		CHK(media_type_out_->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264));
		CHK(media_type_out_->SetUINT32(MF_MT_MPEG2_PROFILE, eAVEncH264VProfile_Main));
		//CHK(media_type_out_->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32));   
		CHK(media_type_out_->SetUINT32(MF_MT_AVG_BITRATE, BITRATE));
		CHK(media_type_out_->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive));
		CHK(MFSetAttributeSize(media_type_out_.Get(), MF_MT_FRAME_SIZE, WIDTH, HEIGHT));
		CHK(MFSetAttributeRatio(media_type_out_.Get(), MF_MT_FRAME_RATE, RATE_NUM, RATE_DENOM));
		CHK(MFSetAttributeRatio(media_type_out_.Get(), MF_MT_PIXEL_ASPECT_RATIO, ASPECT_NUM, ASPECT_DENOM));

		CHK(sink_writer_->AddStream(media_type_out_.Get(), &stream_index_));




		IMFTransformPtr mft;
		//IMFRateSupportPtr ptr;

		//CHK(sink_writer_->GetServiceForStream(stream_index_, MF_RATE_CONTROL_SERVICE, __uuidof(IMFRateSupport), &ptr));

		// オーディオ

		CHK(MFCreateMediaType(&media_type_out_audio_));
		CHK(media_type_out_audio_->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio));
		CHK(media_type_out_audio_->SetGUID(MF_MT_SUBTYPE, MFAudioFormat_AAC));
		CHK(media_type_out_audio_->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, samples_per_second));
		CHK(media_type_out_audio_->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, bits_per_sample));
		CHK(media_type_out_audio_->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, channel_count));
		CHK(media_type_out_audio_->SetUINT32(MF_MT_AUDIO_AVG_BYTES_PER_SECOND, average_bytes_per_second));
		CHK(media_type_out_audio_->SetUINT32(MF_MT_AUDIO_BLOCK_ALIGNMENT, 1));
		CHK(sink_writer_->AddStream(media_type_out_audio_.Get(), &stream_index_audio_));

		//   
		// 入力メディアタイプのセットアップ  
		//   

		// ビデオ

		CHK(MFCreateMediaType(&media_type_in_));
		CHK(media_type_in_->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video));
		CHK(media_type_in_->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_RGB32));
		CHK(media_type_in_->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive));
		CHK(MFSetAttributeSize(media_type_in_.Get(), MF_MT_FRAME_SIZE, WIDTH, HEIGHT));
		CHK(MFSetAttributeRatio(media_type_in_.Get(), MF_MT_FRAME_RATE, RATE_NUM, RATE_DENOM));
		CHK(MFSetAttributeRatio(media_type_in_.Get(), MF_MT_PIXEL_ASPECT_RATIO, ASPECT_NUM, ASPECT_DENOM));

		// エンコーダーのセットアップ
		//prop_variant prop;
		//IPropertyStorePtr pPropertyStore;
		//IMFAttributesPtr pEncoderParameters;

		//CHK(PSCreateMemoryPropertyStore(__uuidof(IPropertyStore), (void**) &pPropertyStore));

		//prop.value().vt = VT_BOOL;
		//prop.value().boolVal = VARIANT_FALSE;
		//CHK(pPropertyStore->SetValue(MFPKEY_VBRENABLED, prop.value()));
		//prop.value().vt = VT_I4;
		//prop.value().lVal = 100;
		//CHK(pPropertyStore->SetValue(MFPKEY_VBRQUALITY, prop.value()));

		//CHK(MFCreateAttributes(&pEncoderParameters, 5));
		//CHK(attr_->SetUnknown(MF_SINK_WRITER_ENCODER_CONFIG, pPropertyStore.Get()));

		CHK(sink_writer_->SetInputMediaType(stream_index_, media_type_in_.Get(), nullptr /*pEncoderParameters.Get()*/));

		// オーディオ

		CHK(MFCreateMediaType(&media_type_in_audio_));
		//CHK(media_type_in_audio_->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio));
		//CHK(media_type_in_audio_->SetGUID(MF_MT_SUBTYPE, MFAudioFormat_PCM));
		//CHK(media_type_in_audio_->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, bits_per_sample));
		//CHK(media_type_in_audio_->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, samples_per_second));
		//CHK(media_type_in_audio_->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, channel_count));
		audio_media_type_->CopyAllItems(media_type_in_audio_.Get());
		CHK(sink_writer_->SetInputMediaType(stream_index_audio_, media_type_in_audio_.Get(), NULL));

		// ハードウェアエンコーダが使われているかの確認

		{
			IMFTransformPtr transform;
			ICodecAPIPtr codec;
			GUID guid;

			CHK(sink_writer_->GetServiceForStream(stream_index_, GUID_NULL, IID_IMFTransform, &transform));

			IMFAttributesPtr attributes;
			CHK(transform->GetAttributes(&attributes));
			UINT32 l = 0;
			std::wstring str;
			bool use_hw = false;
			HRESULT hr = attributes->GetStringLength(MFT_ENUM_HARDWARE_URL_Attribute, &l);
			if (SUCCEEDED(hr))
			{
				str.reserve(l + 1);
				hr = attributes->GetString(MFT_ENUM_HARDWARE_URL_Attribute, (LPWSTR) str.data(), l + 1, &l);
				if (SUCCEEDED(hr)){
					use_hw = true;
					DOUT2(L"/////// HARDWARE ENCODE IS USED. ////\n");
				}
			}
		}

		//   
		// 出力開始  
		//   

		CHK(sink_writer_->BeginWriting());

		//   
		// メディア・サンプルの作成   
		//   

		CHK(MFCreateSample(&sample_));
		video_sample_time_ = 0;
		CHK(sample_->SetSampleDuration(hnsSampleDuration));

		//   
		// メディア・バッファの生成と、メディア・サンプルへの追加    
		//   

		CHK(MFCreateAlignedMemoryBuffer(cbMaxLength, MF_16_BYTE_ALIGNMENT, &buffer_));// 16 byteアラインメント
		CHK(buffer_->SetCurrentLength(cbMaxLength));
		CHK(sample_->AddBuffer(buffer_.Get()));

		//
		// 読み込みテクスチャをマップ
		sf::map<> map(context,texture, 0, D3D11_MAP_READ, 0);
		copy_image_.reset(new video_writer::copy_image(width_, height_, map.row_pitch()));
		copy_func_ = (copy_func_t)copy_image_->getCode();

	}
コード例 #10
0
void PostProcessor::PostProcess(ID3D11PixelShader* pixelShader, const WCHAR* name)
{
    _ASSERT(context);

    _ASSERT(inputs.size() <= MaxInputs);

    D3DPERF_BeginEvent(0xFFFFFFFF, name);

    // Set the outputs
    ID3D11RenderTargetView** renderTargets = reinterpret_cast<ID3D11RenderTargetView**>(&outputs[0]);
    context->OMSetRenderTargets(static_cast<UINT>(outputs.size()), renderTargets, NULL);

    // Set the input textures
    ID3D11ShaderResourceView** textures = reinterpret_cast<ID3D11ShaderResourceView**>(&inputs[0]);
    context->PSSetShaderResources(0, static_cast<UINT>(inputs.size()), textures);

    // Set the constants
    D3D11_MAPPED_SUBRESOURCE mapped;
    DXCall(context->Map(psConstants, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped));
    PSConstants* constants = reinterpret_cast<PSConstants*>(mapped.pData);

    for (size_t i = 0; i < inputs.size(); ++i)
    {
        ID3D11Resource* resource;
        ID3D11Texture2DPtr texture;
        D3D11_TEXTURE2D_DESC desc;
        D3D11_SHADER_RESOURCE_VIEW_DESC srDesc;
        inputs[i]->GetDesc(&srDesc);
        UINT mipLevel = srDesc.Texture2D.MostDetailedMip;
        inputs[i]->GetResource(&resource);
        texture.Attach(reinterpret_cast<ID3D11Texture2D*>(resource));
        texture->GetDesc(&desc);
        constants->InputSize[i].x = static_cast<float>(max(desc.Width / (1 << mipLevel), 1));
        constants->InputSize[i].y = static_cast<float>(max(desc.Height / (1 << mipLevel), 1));
    }

    ID3D11Resource* resource;
    ID3D11Texture2DPtr texture;
    D3D11_TEXTURE2D_DESC desc;
    D3D11_RENDER_TARGET_VIEW_DESC rtDesc;
    outputs[0]->GetResource(&resource);
    outputs[0]->GetDesc(&rtDesc);
    UINT mipLevel = rtDesc.Texture2D.MipSlice;
    texture.Attach(reinterpret_cast<ID3D11Texture2D*>(resource));
    texture->GetDesc(&desc);
    constants->OutputSize.x = static_cast<float>(max(desc.Width / (1 << mipLevel), 1));
    constants->OutputSize.y = static_cast<float>(max(desc.Height / (1 << mipLevel), 1));

    context->Unmap(psConstants, 0);

    ID3D11Buffer* constantBuffers[1] = { psConstants };
    context->PSSetConstantBuffers(0, 1, constantBuffers);

    // Set the viewports
    D3D11_VIEWPORT viewports[16];
    for (UINT_PTR i = 0; i < 16; ++i)
    {
        viewports[i].Width = static_cast<float>(max(desc.Width / (1 << mipLevel), 1));
        viewports[i].Height = static_cast<float>(max(desc.Height / (1 << mipLevel), 1));
        viewports[i].TopLeftX = 0;
        viewports[i].TopLeftY = 0;
        viewports[i].MinDepth = 0.0f;
        viewports[i].MaxDepth = 1.0f;
    }
    context->RSSetViewports(static_cast<UINT>(outputs.size()), viewports);

    // Set the pixel shader
    context->PSSetShader(pixelShader, NULL, 0);

    // Draw the quad
    context->DrawIndexed(6, 0, 0);

    inputs.erase(inputs.begin(), inputs.end());
    outputs.erase(outputs.begin(), outputs.end());

    texture = NULL;
    D3DPERF_EndEvent();
}
コード例 #11
0
ファイル: DDuplGrabber.cpp プロジェクト: bWHYq/Lightpack
GrabResult DDuplGrabber::grabScreens()
{
    if (m_state != Allocated)
    {
        if (m_state == AccessDeniedDuplication)
        {
            // If access to Desktop Duplication is denied, as far as we know 3D application is running
            // Return black buffers and retry allocation in isReallocationNeeded
            return returnBlackBuffer();
        }
        else if (m_state == AccessDeniedDesktop)
        {
            // If access to the input desktop is denied, as far as we know a secure desktop is active
            // Retry allocation in isReallocationNeeded
            if (m_isSessionLocked)
            {
                // In case of logon screen, keeping the last image will most closely resemble what we've last seen, so GrabResultFrameNotReady is better
                return GrabResultFrameNotReady;
            }
            else
            {
                // In case of UAC prompt, that will at least be what we'll have before and after the prompt, reducing its visual impact
                return returnBlackBuffer();
            }
        }
        else
        {
            return GrabResultFrameNotReady;
        }
    }

    try
    {
        bool anyUpdate = false;
        for (GrabbedScreen& screen : _screensWithWidgets)
        {
            if (screen.associatedData == NULL)
            {
                return GrabResultError;
            }

            DDuplScreenData* screenData = (DDuplScreenData*)screen.associatedData;
            DXGI_OUTDUPL_FRAME_INFO frameInfo;
            IDXGIResourcePtr resource;
            HRESULT hr = screenData->duplication->AcquireNextFrame(ACQUIRE_TIMEOUT_INTERVAL, &frameInfo, &resource);
            if (hr == DXGI_ERROR_WAIT_TIMEOUT)
            {
				// If we have an old image for this screen, we can stick to that, otherwise we have to wait
				if (screen.imgData == NULL)
					return GrabResultFrameNotReady;
				else
					continue;
            }
            else if (hr == DXGI_ERROR_ACCESS_LOST || hr == DXGI_ERROR_INVALID_CALL)
            {
                // in theory, DXGI_ERROR_INVALID_CALL is returned if the frame was not released
                // it also happens in conjunction with secure desktop (even though the frame was properly released)
                m_state = LostAccess;
                DEBUG_LOW_LEVEL << Q_FUNC_INFO << "Lost Access to desktop" << screen.screenInfo.handle << ":" << (void*)(0xffffffff & hr) << ", requesting realloc";
                return GrabResultFrameNotReady;
            }
            else if (FAILED(hr))
            {
                qCritical(Q_FUNC_INFO " Failed to AcquireNextFrame: 0x%X", hr);
                return GrabResultError;
            }
            anyUpdate = true;

            ID3D11Texture2DPtr texture;
            hr = resource->QueryInterface(IID_ID3D11Texture2D, (void**)&texture);
            if (FAILED(hr))
            {
                qCritical(Q_FUNC_INFO " Failed to cast resource to ID3D11Texture2D: 0x%X", hr);
                return GrabResultError;
            }

            D3D11_TEXTURE2D_DESC desc;
            texture->GetDesc(&desc);

            if (desc.Width != screen.screenInfo.rect.width() || desc.Height != screen.screenInfo.rect.height())
            {
                qCritical(Q_FUNC_INFO " Dimension mismatch: screen %d x %d, texture %d x %d",
                    screen.screenInfo.rect.width(),
                    screen.screenInfo.rect.height(),
                    desc.Width,
                    desc.Height);
                return GrabResultError;
            }

            size_t sizeNeeded = desc.Height * desc.Width * 4; // Assumes 4 bytes per pixel
            if (screen.imgData == NULL)
            {
                screen.imgData = (unsigned char*)malloc(sizeNeeded);
                screen.imgDataSize = sizeNeeded;
            }
            else if (screen.imgDataSize != sizeNeeded)
            {
                qCritical(Q_FUNC_INFO " Unexpected buffer size %d where %d is expected", screen.imgDataSize, sizeNeeded);
                return GrabResultError;
            }

            D3D11_TEXTURE2D_DESC texDesc;
            ZeroMemory(&texDesc, sizeof(texDesc));
            texDesc.Width = desc.Width;
            texDesc.Height = desc.Height;
            texDesc.MipLevels = 1;
            texDesc.ArraySize = 1;
            texDesc.SampleDesc.Count = 1;
            texDesc.SampleDesc.Quality = 0;
            texDesc.Usage = D3D11_USAGE_STAGING;
            texDesc.Format = desc.Format;
            texDesc.BindFlags = 0;
            texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
            texDesc.MiscFlags = 0;
            ID3D11Texture2DPtr textureCopy;
            hr = screenData->device->CreateTexture2D(&texDesc, NULL, &textureCopy);
            if (FAILED(hr))
            {
                qCritical(Q_FUNC_INFO " Failed to CreateTexture2D: 0x%X", hr);
                return GrabResultError;
            }

            screenData->context->CopyResource(textureCopy, texture);

            IDXGISurface1Ptr surface;
            hr = textureCopy->QueryInterface(IID_IDXGISurface1, (void**)&surface);
            if (FAILED(hr))
            {
                qCritical(Q_FUNC_INFO " Failed to cast textureCopy to IID_IDXGISurface1: 0x%X", hr);
                return GrabResultError;
            }

            DXGI_MAPPED_RECT map;
            hr = surface->Map(&map, DXGI_MAP_READ);
            if (FAILED(hr))
            {
                qCritical(Q_FUNC_INFO " Failed to get surface map: 0x%X", hr);
                return GrabResultError;
            }

            for (unsigned int i = 0; i < desc.Height; i++)
            {
                memcpy_s(screen.imgData + (i * desc.Width) * 4, desc.Width * 4, map.pBits + i*map.Pitch, desc.Width * 4);
            }

            screen.imgFormat = mapDXGIFormatToBufferFormat(desc.Format);

            screenData->duplication->ReleaseFrame();
        }

        if (!anyUpdate)
            return GrabResultFrameNotReady;
    }
    catch (_com_error e)
    {
        qCritical(Q_FUNC_INFO " COM Error: 0x%X", e.Error());
        return GrabResultError;
    }

    return GrabResultOk;
}