forked from DiligentGraphics/DiligentCore
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathShaderResourceCacheWebGPU.hpp
More file actions
301 lines (254 loc) · 12.8 KB
/
ShaderResourceCacheWebGPU.hpp
File metadata and controls
301 lines (254 loc) · 12.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
/*
* Copyright 2023-2026 Diligent Graphics LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* In no event and under no legal theory, whether in tort (including negligence),
* contract, or otherwise, unless required by applicable law (such as deliberate
* and grossly negligent acts) or agreed to in writing, shall any Contributor be
* liable for any damages, including any direct, indirect, special, incidental,
* or consequential damages of any character arising as a result of this License or
* out of the use or inability to use the software (including but not limited to damages
* for loss of goodwill, work stoppage, computer failure or malfunction, or any and
* all other commercial damages or losses), even if such Contributor has been advised
* of the possibility of such damages.
*/
#pragma once
/// \file
/// Declaration of Diligent::ShaderResourceCacheWebGPU class
#include <vector>
#include <memory>
#include "ShaderResourceCacheCommon.hpp"
#include "PipelineResourceAttribsWebGPU.hpp"
#include "STDAllocator.hpp"
#include "WebGPUObjectWrappers.hpp"
#include "IndexWrapper.hpp"
namespace Diligent
{
struct IMemoryAllocator;
class DeviceContextWebGPUImpl;
class ShaderResourceCacheWebGPU : public ShaderResourceCacheBase
{
public:
ShaderResourceCacheWebGPU(ResourceCacheContentType ContentType) noexcept;
// clang-format off
ShaderResourceCacheWebGPU (const ShaderResourceCacheWebGPU&) = delete;
ShaderResourceCacheWebGPU& operator=(const ShaderResourceCacheWebGPU&) = delete;
ShaderResourceCacheWebGPU ( ShaderResourceCacheWebGPU&&) = delete;
ShaderResourceCacheWebGPU& operator=( ShaderResourceCacheWebGPU&&) = delete;
// clang-format on
~ShaderResourceCacheWebGPU();
static size_t GetRequiredMemorySize(Uint32 NumGroups, const Uint32* GroupSizes, Uint32 TotalInlineConstants = 0);
void InitializeGroups(IMemoryAllocator& MemAllocator, Uint32 NumGroups, const Uint32* GroupSizes, Uint32 TotalInlineConstants = 0);
void InitializeResources(Uint32 GroupIdx,
Uint32 Offset,
Uint32 ArraySize,
BindGroupEntryType Type,
bool HasImmutableSampler);
void InitializeInlineConstantBuffer(Uint32 GroupIdx,
Uint32 Offset,
Uint32 InlineConstantOffset,
Uint32 NumInlineConstants,
RefCntAutoPtr<IDeviceObject> pObject = {});
struct Resource
{
explicit Resource(BindGroupEntryType _Type, bool _HasImmutableSampler) noexcept :
Type{_Type},
HasImmutableSampler{_HasImmutableSampler}
{
VERIFY(Type == BindGroupEntryType::Texture || Type == BindGroupEntryType::Sampler || !HasImmutableSampler,
"Immutable sampler can only be assigned to a textre or a sampler");
}
explicit Resource(BindGroupEntryType _Type, void* _pInlineConstantData = nullptr) noexcept :
Type{_Type},
HasImmutableSampler{false},
pInlineConstantData{_pInlineConstantData}
{
VERIFY(Type == BindGroupEntryType::UniformBufferDynamic, "Inline constant buffer must be of type UniformBufferDynamic");
}
// clang-format off
Resource (const Resource&) = delete;
Resource ( Resource&&) = delete;
Resource& operator=(const Resource&) = delete;
Resource& operator=( Resource&&) = delete;
/* 0 */ const BindGroupEntryType Type;
/* 1 */ const bool HasImmutableSampler;
/*2-3*/ // Unused
/* 4 */ Uint32 BufferDynamicOffset = 0;
/* 8 */ RefCntAutoPtr<IDeviceObject> pObject;
// For uniform and storage buffers only
/*16 */ Uint64 BufferBaseOffset = 0;
/*24 */ Uint64 BufferRangeSize = 0;
// For inline constant buffers only - pointer to the staging data
/*32 */ void* const pInlineConstantData = nullptr;
// clang-format on
void SetUniformBuffer(RefCntAutoPtr<IDeviceObject>&& _pBuffer, Uint64 _RangeOffset, Uint64 _RangeSize);
void SetStorageBuffer(RefCntAutoPtr<IDeviceObject>&& _pBufferView);
// Writes inline constant data to the staging buffer.
// IMPORTANT: Does NOT call UpdateRevision() - inline constants can change after SRB commit.
void SetInlineConstants(const void* pData, Uint32 FirstConstant, Uint32 NumConstants)
{
VERIFY(pInlineConstantData != nullptr, "Inline constant data pointer is not initialized");
VERIFY(pData != nullptr, "Source data is null");
VERIFY(FirstConstant + NumConstants <= BufferRangeSize / sizeof(Uint32),
"Too many constants (", FirstConstant + NumConstants, ") for the allocated space (", BufferRangeSize / sizeof(Uint32), ")");
memcpy(static_cast<Uint32*>(pInlineConstantData) + FirstConstant, pData, NumConstants * sizeof(Uint32));
}
template <typename ResType>
Uint32 GetDynamicBufferOffset(const DeviceContextWebGPUImpl* pCtx) const;
explicit operator bool() const { return pObject != nullptr; }
};
class BindGroup
{
public:
// clang-format off
BindGroup(Uint32 NumResources, Resource* pResources, WGPUBindGroupEntry* pwgpuEntries) :
m_NumResources{NumResources},
m_pResources {pResources },
m_wgpuEntries {pwgpuEntries}
{}
BindGroup (const BindGroup&) = delete;
BindGroup ( BindGroup&&) = delete;
BindGroup& operator=(const BindGroup&) = delete;
BindGroup& operator=( BindGroup&&) = delete;
// clang-format on
const Resource& GetResource(Uint32 CacheOffset) const
{
VERIFY(CacheOffset < m_NumResources, "Offset ", CacheOffset, " is out of range");
return m_pResources[CacheOffset];
}
Uint32 GetSize() const { return m_NumResources; }
WGPUBindGroup GetWGPUBindGroup() const
{
return m_wgpuBindGroup;
}
private:
/* 0 */ const Uint32 m_NumResources = 0;
/* 5*/ bool m_IsDirty = true;
/* 8 */ Resource* const m_pResources = nullptr;
/*16 */ WGPUBindGroupEntry* const m_wgpuEntries = nullptr;
/*24 */ WebGPUBindGroupWrapper m_wgpuBindGroup;
/*40 */ // End of structure
private:
friend ShaderResourceCacheWebGPU;
Resource& GetResource(Uint32 CacheOffset)
{
VERIFY(CacheOffset < m_NumResources, "Offset ", CacheOffset, " is out of range");
return m_pResources[CacheOffset];
}
WGPUBindGroupEntry& GetWGPUEntry(Uint32 CacheOffset)
{
VERIFY(CacheOffset < m_NumResources, "Offset ", CacheOffset, " is out of range");
return m_wgpuEntries[CacheOffset];
}
};
const BindGroup& GetBindGroup(Uint32 Index) const
{
VERIFY_EXPR(Index < m_NumBindGroups);
return reinterpret_cast<const BindGroup*>(m_pMemory.get())[Index];
}
// Sets the resource at the given descriptor set index and offset
const Resource& SetResource(Uint32 BindGroupIdx,
Uint32 CacheOffset,
RefCntAutoPtr<IDeviceObject> pObject,
Uint64 BufferBaseOffset = 0,
Uint64 BufferRangeSize = 0);
const Resource& ResetResource(Uint32 SetIndex,
Uint32 Offset)
{
return SetResource(SetIndex, Offset, {});
}
void SetDynamicBufferOffset(Uint32 DescrSetIndex,
Uint32 CacheOffset,
Uint32 DynamicBufferOffset);
Uint32 GetNumBindGroups() const { return m_NumBindGroups; }
bool HasDynamicResources() const { return m_NumDynamicBuffers > 0; }
bool HasInlineConstants() const
{
return m_HasInlineConstants != 0;
}
ResourceCacheContentType GetContentType() const { return static_cast<ResourceCacheContentType>(m_ContentType); }
WGPUBindGroup UpdateBindGroup(WGPUDevice wgpuDevice, Uint32 GroupIndex, WGPUBindGroupLayout wgpuGroupLayout);
// Returns true if any dynamic offset has changed
bool GetDynamicBufferOffsets(const DeviceContextWebGPUImpl* pCtx,
std::vector<uint32_t>& Offsets,
Uint32 GroupIdx) const;
// Writes inline constant data to the staging buffer
void SetInlineConstants(Uint32 BindGroupIdx, Uint32 CacheOffset, const void* pConstants, Uint32 FirstConstant, Uint32 NumConstants);
// Copies inline constant data from source cache to this cache
void CopyInlineConstants(const ShaderResourceCacheWebGPU& SrcCache,
Uint32 BindGroupIdx,
Uint32 SrcCacheOffset,
Uint32 DstCacheOffset,
Uint32 NumConstants);
#ifdef DILIGENT_DEBUG
// For debug purposes only
void DbgVerifyResourceInitialization() const;
void DbgVerifyDynamicBuffersCounter() const;
#endif
private:
#ifdef DILIGENT_DEBUG
const Resource* GetFirstResourcePtr() const
{
return AlignUpPtr(reinterpret_cast<const Resource*>(static_cast<const BindGroup*>(m_pMemory.get()) + m_NumBindGroups));
}
#endif
BindGroup* GetFirstBindGroupPtr()
{
return reinterpret_cast<BindGroup*>(m_pMemory.get());
}
Resource* GetFirstResourcePtr()
{
Resource* pFirstResource = AlignUpPtr(reinterpret_cast<Resource*>(GetFirstBindGroupPtr() + m_NumBindGroups));
VERIFY(reinterpret_cast<Uint8*>(pFirstResource + m_TotalResources) <= m_DbgMemoryEnd,
"Resource storage exceeds allocated memory. This indicates a bug in memory calculation logic");
return pFirstResource;
}
WGPUBindGroupEntry* GetFirstWGPUEntryPtr()
{
WGPUBindGroupEntry* pFirstWGPUEntry = AlignUpPtr(reinterpret_cast<WGPUBindGroupEntry*>(GetFirstResourcePtr() + m_TotalResources));
VERIFY(reinterpret_cast<Uint8*>(pFirstWGPUEntry + m_TotalResources) <= m_DbgMemoryEnd,
"WGPUBindGroupEntry storage exceeds allocated memory. This indicates a bug in memory calculation logic");
return pFirstWGPUEntry;
}
Uint32* GetInlineConstantDataPtr(Uint32 Offset = 0)
{
Uint32* pFirstInlineConstantData = AlignUpPtr(reinterpret_cast<Uint32*>(GetFirstWGPUEntryPtr() + m_TotalResources));
VERIFY(reinterpret_cast<Uint8*>(pFirstInlineConstantData + Offset) <= m_DbgMemoryEnd,
"Inline constant storage exceeds allocated memory. This indicates a bug in memory calculation logic");
return pFirstInlineConstantData + Offset;
}
BindGroup& GetBindGroup(Uint32 Index)
{
VERIFY_EXPR(Index < m_NumBindGroups);
return GetFirstBindGroupPtr()[Index];
}
private:
std::unique_ptr<void, STDDeleter<void, IMemoryAllocator>> m_pMemory;
Uint16 m_NumBindGroups = 0;
// The total actual number of dynamic buffers (that were created with USAGE_DYNAMIC) bound in the resource cache
// regardless of the variable type.
Uint16 m_NumDynamicBuffers = 0;
Uint32 m_TotalResources : 30;
// Indicates what types of resources are stored in the cache
const Uint32 m_ContentType : 1;
// Indicates whether the cache has inline constants
Uint32 m_HasInlineConstants : 1;
#ifdef DILIGENT_DEBUG
// Debug array that stores flags indicating if resources in the cache have been initialized
std::vector<std::vector<bool>> m_DbgInitializedResources;
std::vector<bool> m_DbgAssignedInlineConstants;
Uint8* m_DbgMemoryEnd = nullptr;
#endif
};
} // namespace Diligent