Line data Source code
1 : /**
2 : Copyright (c) 2021 Roman Katuntsev <sbkarr@stappler.org>
3 : Copyright (c) 2023 Stappler LLC <admin@stappler.dev>
4 :
5 : Permission is hereby granted, free of charge, to any person obtaining a copy
6 : of this software and associated documentation files (the "Software"), to deal
7 : in the Software without restriction, including without limitation the rights
8 : to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 : copies of the Software, and to permit persons to whom the Software is
10 : furnished to do so, subject to the following conditions:
11 :
12 : The above copyright notice and this permission notice shall be included in
13 : all copies or substantial portions of the Software.
14 :
15 : THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 : AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 : OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 : THE SOFTWARE.
22 : **/
23 :
24 : #ifndef XENOLITH_BACKEND_VK_XLVKOBJECT_H_
25 : #define XENOLITH_BACKEND_VK_XLVKOBJECT_H_
26 :
27 : #include "XLVkDevice.h"
28 : #include "XLVkAllocator.h"
29 : #include "XLCoreObject.h"
30 : #include "XLCoreImageStorage.h"
31 :
32 : namespace STAPPLER_VERSIONIZED stappler::xenolith::vk {
33 :
34 : class Surface;
35 : class SwapchainImage;
36 :
37 : struct DeviceMemoryInfo {
38 : VkDeviceSize size;
39 : VkDeviceSize alignment;
40 : uint32_t memoryType;
41 : bool dedicated;
42 : };
43 :
44 : enum class DeviceMemoryAccess {
45 : None = 0,
46 : Invalidate = 1 << 0,
47 : Flush = 1 << 1,
48 : Full = Invalidate | Flush
49 : };
50 :
51 : SP_DEFINE_ENUM_AS_MASK(DeviceMemoryAccess)
52 :
53 : class DeviceMemory : public core::Object {
54 : public:
55 1396802 : virtual ~DeviceMemory() { }
56 :
57 : bool init(Allocator *, DeviceMemoryInfo, VkDeviceMemory, AllocationUsage);
58 : bool init(DeviceMemoryPool *, Allocator::MemBlock &&, AllocationUsage);
59 :
60 : bool isPersistentMapped() const;
61 :
62 : uint8_t *getPersistentMappedRegion() const;
63 :
64 0 : const DeviceMemoryInfo &getInfo() const { return _info; }
65 700638 : VkDeviceMemory getMemory() const { return _memory; }
66 : AllocationUsage getUsage() const { return _usage; }
67 2348708 : DeviceMemoryPool *getPool() const { return _pool; }
68 :
69 700554 : VkDeviceSize getBlockOffset() const { return _memBlock.offset; }
70 :
71 : bool isMappable() const { return _usage != AllocationUsage::DeviceLocal && _usage != AllocationUsage::DeviceLocalLazilyAllocated; }
72 :
73 : bool map(const Callback<void(uint8_t *, VkDeviceSize)> &, VkDeviceSize offset = 0, VkDeviceSize size = maxOf<VkDeviceSize>(),
74 : DeviceMemoryAccess = DeviceMemoryAccess::Full);
75 :
76 : void invalidateMappedRegion(VkDeviceSize offset = 0, VkDeviceSize size = maxOf<VkDeviceSize>());
77 : void flushMappedRegion(VkDeviceSize offset = 0, VkDeviceSize size = maxOf<VkDeviceSize>());
78 :
79 : protected:
80 : using core::Object::init;
81 :
82 : VkMappedMemoryRange calculateMappedMemoryRange(VkDeviceSize offset, VkDeviceSize size) const;
83 :
84 : DeviceMemoryInfo _info;
85 : DeviceMemoryPool *_pool = nullptr;
86 : VkDeviceMemory _memory = VK_NULL_HANDLE;
87 : AllocationUsage _usage = AllocationUsage::DeviceLocal;
88 : Allocator::MemBlock _memBlock;
89 : Rc<Allocator> _allocator;
90 :
91 : VkDeviceSize _mappedOffset = 0;
92 : VkDeviceSize _mappedSize = 0;
93 :
94 : Mutex _mappingProtectionMutex;
95 : };
96 :
97 : class Image : public core::ImageObject {
98 : public:
99 1404 : virtual ~Image() { }
100 :
101 : // non-owining image wrapping
102 : bool init(Device &dev, VkImage, const ImageInfoData &, uint32_t);
103 :
104 : // owning image wrapping
105 : bool init(Device &dev, VkImage, const ImageInfoData &, Rc<DeviceMemory> &&, Rc<core::DataAtlas> && = Rc<core::DataAtlas>());
106 : bool init(Device &dev, uint64_t, VkImage, const ImageInfoData &, Rc<DeviceMemory> &&, Rc<core::DataAtlas> && = Rc<core::DataAtlas>());
107 :
108 44588 : VkImage getImage() const { return _image; }
109 0 : DeviceMemory *getMemory() const { return _memory; }
110 :
111 : void setPendingBarrier(const ImageMemoryBarrier &);
112 : const ImageMemoryBarrier *getPendingBarrier() const;
113 : void dropPendingBarrier();
114 :
115 : VkImageAspectFlags getAspectMask() const;
116 :
117 : bool bindMemory(Rc<DeviceMemory> &&, VkDeviceSize = 0);
118 :
119 : protected:
120 : using core::ImageObject::init;
121 :
122 : Rc<DeviceMemory> _memory;
123 : VkImage _image = VK_NULL_HANDLE;
124 : std::optional<ImageMemoryBarrier> _barrier;
125 : };
126 :
127 : class Buffer : public core::BufferObject {
128 : public:
129 1399312 : virtual ~Buffer() { }
130 :
131 : bool init(Device &dev, VkBuffer, const BufferInfo &, Rc<DeviceMemory> &&, VkDeviceSize memoryOffset);
132 :
133 6192495 : VkBuffer getBuffer() const { return _buffer; }
134 2348792 : DeviceMemory *getMemory() const { return _memory; }
135 :
136 : void setPendingBarrier(const BufferMemoryBarrier &);
137 : const BufferMemoryBarrier *getPendingBarrier() const;
138 : void dropPendingBarrier();
139 :
140 : bool bindMemory(Rc<DeviceMemory> &&, VkDeviceSize = 0);
141 :
142 : bool map(const Callback<void(uint8_t *, VkDeviceSize)> &, VkDeviceSize offset = 0, VkDeviceSize size = maxOf<VkDeviceSize>(),
143 : DeviceMemoryAccess = DeviceMemoryAccess::Full);
144 :
145 : uint8_t *getPersistentMappedRegion(bool invalidate = true);
146 :
147 : void invalidateMappedRegion(VkDeviceSize offset = 0, VkDeviceSize size = maxOf<VkDeviceSize>());
148 : void flushMappedRegion(VkDeviceSize offset = 0, VkDeviceSize size = maxOf<VkDeviceSize>());
149 :
150 : bool setData(BytesView, VkDeviceSize offset = 0);
151 : Bytes getData(VkDeviceSize size = maxOf<VkDeviceSize>(), VkDeviceSize offset = 0);
152 :
153 : // returns maxOf<uint64_t>() on overflow
154 : uint64_t reserveBlock(uint64_t blockSize, uint64_t alignment);
155 94 : uint64_t getReservedSize() const { return _targetOffset.load(); }
156 :
157 : protected:
158 : using core::BufferObject::init;
159 :
160 : Rc<DeviceMemory> _memory;
161 : VkDeviceSize _memoryOffset = 0;
162 : VkBuffer _buffer = VK_NULL_HANDLE;
163 : std::optional<BufferMemoryBarrier> _barrier;
164 :
165 : std::atomic<uint64_t> _targetOffset = 0;
166 : };
167 :
168 : class ImageView : public core::ImageView {
169 : public:
170 1484 : virtual ~ImageView() { }
171 :
172 : bool init(Device &dev, VkImage, VkFormat format);
173 : bool init(Device &dev, Image *, const ImageViewInfo &);
174 :
175 64538 : VkImageView getImageView() const { return _imageView; }
176 :
177 : protected:
178 : using core::ImageView::init;
179 :
180 : VkImageView _imageView = VK_NULL_HANDLE;
181 : };
182 :
183 : class Sampler : public core::Sampler {
184 : public:
185 168 : virtual ~Sampler() { }
186 :
187 : bool init(Device &dev, const SamplerInfo &);
188 :
189 84 : VkSampler getSampler() const { return _sampler; }
190 :
191 : protected:
192 : using core::Sampler::init;
193 :
194 : VkSampler _sampler = VK_NULL_HANDLE;
195 : };
196 :
197 : }
198 :
199 : #endif /* XENOLITH_BACKEND_VK_XLVKOBJECT_H_ */
|