vkQuake2 doxygen  1.0 dev
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1685 /*
1686 Define this macro to 0/1 to disable/enable support for recording functionality,
1687 available through VmaAllocatorCreateInfo::pRecordSettings.
1688 */
1689 #ifndef VMA_RECORDING_ENABLED
1690  #define VMA_RECORDING_ENABLED 0
1691 #endif
1692 
1693 #ifndef NOMINMAX
1694  #define NOMINMAX // For windows.h
1695 #endif
1696 
1697 #ifndef VULKAN_H_
1698  #include <vulkan/vulkan.h>
1699 #endif
1700 
1701 #if VMA_RECORDING_ENABLED
1702  #include <windows.h>
1703 #endif
1704 
1705 #if !defined(VMA_DEDICATED_ALLOCATION)
1706  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1707  #define VMA_DEDICATED_ALLOCATION 1
1708  #else
1709  #define VMA_DEDICATED_ALLOCATION 0
1710  #endif
1711 #endif
1712 
1713 #if !defined(VMA_BIND_MEMORY2)
1714  #if VK_KHR_bind_memory2
1715  #define VMA_BIND_MEMORY2 1
1716  #else
1717  #define VMA_BIND_MEMORY2 0
1718  #endif
1719 #endif
1720 
1730 VK_DEFINE_HANDLE(VmaAllocator)
1731 
1732 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1734  VmaAllocator allocator,
1735  uint32_t memoryType,
1736  VkDeviceMemory memory,
1737  VkDeviceSize size);
1740  VmaAllocator allocator,
1741  uint32_t memoryType,
1742  VkDeviceMemory memory,
1743  VkDeviceSize size);
1744 
1758 
1800 
1803 typedef VkFlags VmaAllocatorCreateFlags;
1804 
1809 typedef struct VmaVulkanFunctions {
1810  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1811  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1812  PFN_vkAllocateMemory vkAllocateMemory;
1813  PFN_vkFreeMemory vkFreeMemory;
1814  PFN_vkMapMemory vkMapMemory;
1815  PFN_vkUnmapMemory vkUnmapMemory;
1816  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1817  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1818  PFN_vkBindBufferMemory vkBindBufferMemory;
1819  PFN_vkBindImageMemory vkBindImageMemory;
1820  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1821  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1822  PFN_vkCreateBuffer vkCreateBuffer;
1823  PFN_vkDestroyBuffer vkDestroyBuffer;
1824  PFN_vkCreateImage vkCreateImage;
1825  PFN_vkDestroyImage vkDestroyImage;
1826  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1827 #if VMA_DEDICATED_ALLOCATION
1828  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1829  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1830 #endif
1831 #if VMA_BIND_MEMORY2
1832  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1833  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1834 #endif
1836 
1838 typedef enum VmaRecordFlagBits {
1845 
1848 typedef VkFlags VmaRecordFlags;
1849 
1851 typedef struct VmaRecordSettings
1852 {
1862  const char* pFilePath;
1864 
1867 {
1871 
1872  VkPhysicalDevice physicalDevice;
1874 
1875  VkDevice device;
1877 
1880 
1881  const VkAllocationCallbacks* pAllocationCallbacks;
1883 
1923  const VkDeviceSize* pHeapSizeLimit;
1944 
1946 VkResult vmaCreateAllocator(
1947  const VmaAllocatorCreateInfo* pCreateInfo,
1948  VmaAllocator* pAllocator);
1949 
1951 void vmaDestroyAllocator(
1952  VmaAllocator allocator);
1953 
1959  VmaAllocator allocator,
1960  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1961 
1967  VmaAllocator allocator,
1968  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1969 
1977  VmaAllocator allocator,
1978  uint32_t memoryTypeIndex,
1979  VkMemoryPropertyFlags* pFlags);
1980 
1990  VmaAllocator allocator,
1991  uint32_t frameIndex);
1992 
1995 typedef struct VmaStatInfo
1996 {
1998  uint32_t blockCount;
2004  VkDeviceSize usedBytes;
2006  VkDeviceSize unusedBytes;
2009 } VmaStatInfo;
2010 
2012 typedef struct VmaStats
2013 {
2014  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2015  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2017 } VmaStats;
2018 
2020 void vmaCalculateStats(
2021  VmaAllocator allocator,
2022  VmaStats* pStats);
2023 
2024 #ifndef VMA_STATS_STRING_ENABLED
2025 #define VMA_STATS_STRING_ENABLED 1
2026 #endif
2027 
2028 #if VMA_STATS_STRING_ENABLED
2029 
2031 
2033 void vmaBuildStatsString(
2034  VmaAllocator allocator,
2035  char** ppStatsString,
2036  VkBool32 detailedMap);
2037 
2038 void vmaFreeStatsString(
2039  VmaAllocator allocator,
2040  char* pStatsString);
2041 
2042 #endif // #if VMA_STATS_STRING_ENABLED
2043 
2052 VK_DEFINE_HANDLE(VmaPool)
2053 
2054 typedef enum VmaMemoryUsage
2055 {
2104 } VmaMemoryUsage;
2105 
2115 
2176 
2192 
2202 
2209 
2213 
2215 {
2228  VkMemoryPropertyFlags requiredFlags;
2233  VkMemoryPropertyFlags preferredFlags;
2241  uint32_t memoryTypeBits;
2254  void* pUserData;
2256 
2273 VkResult vmaFindMemoryTypeIndex(
2274  VmaAllocator allocator,
2275  uint32_t memoryTypeBits,
2276  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2277  uint32_t* pMemoryTypeIndex);
2278 
2292  VmaAllocator allocator,
2293  const VkBufferCreateInfo* pBufferCreateInfo,
2294  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2295  uint32_t* pMemoryTypeIndex);
2296 
2310  VmaAllocator allocator,
2311  const VkImageCreateInfo* pImageCreateInfo,
2312  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2313  uint32_t* pMemoryTypeIndex);
2314 
2335 
2352 
2363 
2369 
2372 typedef VkFlags VmaPoolCreateFlags;
2373 
2376 typedef struct VmaPoolCreateInfo {
2391  VkDeviceSize blockSize;
2420 
2423 typedef struct VmaPoolStats {
2426  VkDeviceSize size;
2429  VkDeviceSize unusedSize;
2442  VkDeviceSize unusedRangeSizeMax;
2445  size_t blockCount;
2446 } VmaPoolStats;
2447 
2454 VkResult vmaCreatePool(
2455  VmaAllocator allocator,
2456  const VmaPoolCreateInfo* pCreateInfo,
2457  VmaPool* pPool);
2458 
2461 void vmaDestroyPool(
2462  VmaAllocator allocator,
2463  VmaPool pool);
2464 
2471 void vmaGetPoolStats(
2472  VmaAllocator allocator,
2473  VmaPool pool,
2474  VmaPoolStats* pPoolStats);
2475 
2483  VmaAllocator allocator,
2484  VmaPool pool,
2485  size_t* pLostAllocationCount);
2486 
2501 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2502 
2527 VK_DEFINE_HANDLE(VmaAllocation)
2528 
2529 
2531 typedef struct VmaAllocationInfo {
2536  uint32_t memoryType;
2545  VkDeviceMemory deviceMemory;
2550  VkDeviceSize offset;
2555  VkDeviceSize size;
2569  void* pUserData;
2571 
2582 VkResult vmaAllocateMemory(
2583  VmaAllocator allocator,
2584  const VkMemoryRequirements* pVkMemoryRequirements,
2585  const VmaAllocationCreateInfo* pCreateInfo,
2586  VmaAllocation* pAllocation,
2587  VmaAllocationInfo* pAllocationInfo);
2588 
2608 VkResult vmaAllocateMemoryPages(
2609  VmaAllocator allocator,
2610  const VkMemoryRequirements* pVkMemoryRequirements,
2611  const VmaAllocationCreateInfo* pCreateInfo,
2612  size_t allocationCount,
2613  VmaAllocation* pAllocations,
2614  VmaAllocationInfo* pAllocationInfo);
2615 
2623  VmaAllocator allocator,
2624  VkBuffer buffer,
2625  const VmaAllocationCreateInfo* pCreateInfo,
2626  VmaAllocation* pAllocation,
2627  VmaAllocationInfo* pAllocationInfo);
2628 
2630 VkResult vmaAllocateMemoryForImage(
2631  VmaAllocator allocator,
2632  VkImage image,
2633  const VmaAllocationCreateInfo* pCreateInfo,
2634  VmaAllocation* pAllocation,
2635  VmaAllocationInfo* pAllocationInfo);
2636 
2641 void vmaFreeMemory(
2642  VmaAllocator allocator,
2643  VmaAllocation allocation);
2644 
2655 void vmaFreeMemoryPages(
2656  VmaAllocator allocator,
2657  size_t allocationCount,
2658  VmaAllocation* pAllocations);
2659 
2666 VkResult vmaResizeAllocation(
2667  VmaAllocator allocator,
2668  VmaAllocation allocation,
2669  VkDeviceSize newSize);
2670 
2688  VmaAllocator allocator,
2689  VmaAllocation allocation,
2690  VmaAllocationInfo* pAllocationInfo);
2691 
2706 VkBool32 vmaTouchAllocation(
2707  VmaAllocator allocator,
2708  VmaAllocation allocation);
2709 
2724  VmaAllocator allocator,
2725  VmaAllocation allocation,
2726  void* pUserData);
2727 
2739  VmaAllocator allocator,
2740  VmaAllocation* pAllocation);
2741 
2776 VkResult vmaMapMemory(
2777  VmaAllocator allocator,
2778  VmaAllocation allocation,
2779  void** ppData);
2780 
2785 void vmaUnmapMemory(
2786  VmaAllocator allocator,
2787  VmaAllocation allocation);
2788 
2805 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2806 
2823 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2824 
2841 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2842 
2849 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2850 
2851 typedef enum VmaDefragmentationFlagBits {
2855 typedef VkFlags VmaDefragmentationFlags;
2856 
2861 typedef struct VmaDefragmentationInfo2 {
2885  uint32_t poolCount;
2906  VkDeviceSize maxCpuBytesToMove;
2916  VkDeviceSize maxGpuBytesToMove;
2930  VkCommandBuffer commandBuffer;
2932 
2937 typedef struct VmaDefragmentationInfo {
2942  VkDeviceSize maxBytesToMove;
2949 
2951 typedef struct VmaDefragmentationStats {
2953  VkDeviceSize bytesMoved;
2955  VkDeviceSize bytesFreed;
2961 
2991 VkResult vmaDefragmentationBegin(
2992  VmaAllocator allocator,
2993  const VmaDefragmentationInfo2* pInfo,
2994  VmaDefragmentationStats* pStats,
2995  VmaDefragmentationContext *pContext);
2996 
3002 VkResult vmaDefragmentationEnd(
3003  VmaAllocator allocator,
3004  VmaDefragmentationContext context);
3005 
3046 VkResult vmaDefragment(
3047  VmaAllocator allocator,
3048  VmaAllocation* pAllocations,
3049  size_t allocationCount,
3050  VkBool32* pAllocationsChanged,
3051  const VmaDefragmentationInfo *pDefragmentationInfo,
3052  VmaDefragmentationStats* pDefragmentationStats);
3053 
3066 VkResult vmaBindBufferMemory(
3067  VmaAllocator allocator,
3068  VmaAllocation allocation,
3069  VkBuffer buffer);
3070 
3081 VkResult vmaBindBufferMemory2(
3082  VmaAllocator allocator,
3083  VmaAllocation allocation,
3084  VkDeviceSize allocationLocalOffset,
3085  VkBuffer buffer,
3086  const void* pNext);
3087 
3100 VkResult vmaBindImageMemory(
3101  VmaAllocator allocator,
3102  VmaAllocation allocation,
3103  VkImage image);
3104 
3115 VkResult vmaBindImageMemory2(
3116  VmaAllocator allocator,
3117  VmaAllocation allocation,
3118  VkDeviceSize allocationLocalOffset,
3119  VkImage image,
3120  const void* pNext);
3121 
3148 VkResult vmaCreateBuffer(
3149  VmaAllocator allocator,
3150  const VkBufferCreateInfo* pBufferCreateInfo,
3151  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3152  VkBuffer* pBuffer,
3153  VmaAllocation* pAllocation,
3154  VmaAllocationInfo* pAllocationInfo);
3155 
3167 void vmaDestroyBuffer(
3168  VmaAllocator allocator,
3169  VkBuffer buffer,
3170  VmaAllocation allocation);
3171 
3173 VkResult vmaCreateImage(
3174  VmaAllocator allocator,
3175  const VkImageCreateInfo* pImageCreateInfo,
3176  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3177  VkImage* pImage,
3178  VmaAllocation* pAllocation,
3179  VmaAllocationInfo* pAllocationInfo);
3180 
3192 void vmaDestroyImage(
3193  VmaAllocator allocator,
3194  VkImage image,
3195  VmaAllocation allocation);
3196 
3197 #ifdef __cplusplus
3198 }
3199 #endif
3200 
3201 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3202 
3203 // For Visual Studio IntelliSense.
3204 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3205 #define VMA_IMPLEMENTATION
3206 #endif
3207 
3208 #ifdef VMA_IMPLEMENTATION
3209 #undef VMA_IMPLEMENTATION
3210 
3211 #include <cstdint>
3212 #include <cstdlib>
3213 #include <cstring>
3214 
3215 /*******************************************************************************
3216 CONFIGURATION SECTION
3217 
3218 Define some of these macros before each #include of this header or change them
3219 here if you need other then default behavior depending on your environment.
3220 */
3221 
3222 /*
3223 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3224 internally, like:
3225 
3226  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3227 
3228 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3229 VmaAllocatorCreateInfo::pVulkanFunctions.
3230 */
3231 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3232 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3233 #endif
3234 
3235 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3236 //#define VMA_USE_STL_CONTAINERS 1
3237 
3238 /* Set this macro to 1 to make the library including and using STL containers:
3239 std::pair, std::vector, std::list, std::unordered_map.
3240 
3241 Set it to 0 or undefined to make the library using its own implementation of
3242 the containers.
3243 */
3244 #if VMA_USE_STL_CONTAINERS
3245  #define VMA_USE_STL_VECTOR 1
3246  #define VMA_USE_STL_UNORDERED_MAP 1
3247  #define VMA_USE_STL_LIST 1
3248 #endif
3249 
3250 #ifndef VMA_USE_STL_SHARED_MUTEX
3251  // Compiler conforms to C++17.
3252  #if __cplusplus >= 201703L
3253  #define VMA_USE_STL_SHARED_MUTEX 1
3254  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3255  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3256  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3257  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3258  #define VMA_USE_STL_SHARED_MUTEX 1
3259  #else
3260  #define VMA_USE_STL_SHARED_MUTEX 0
3261  #endif
3262 #endif
3263 
3264 /*
3265 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3266 Library has its own container implementation.
3267 */
3268 #if VMA_USE_STL_VECTOR
3269  #include <vector>
3270 #endif
3271 
3272 #if VMA_USE_STL_UNORDERED_MAP
3273  #include <unordered_map>
3274 #endif
3275 
3276 #if VMA_USE_STL_LIST
3277  #include <list>
3278 #endif
3279 
3280 /*
3281 Following headers are used in this CONFIGURATION section only, so feel free to
3282 remove them if not needed.
3283 */
3284 #include <cassert> // for assert
3285 #include <algorithm> // for min, max
3286 #include <mutex>
3287 
3288 #ifndef VMA_NULL
3289  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3290  #define VMA_NULL nullptr
3291 #endif
3292 
3293 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3294 #include <cstdlib>
3295 void *aligned_alloc(size_t alignment, size_t size)
3296 {
3297  // alignment must be >= sizeof(void*)
3298  if(alignment < sizeof(void*))
3299  {
3300  alignment = sizeof(void*);
3301  }
3302 
3303  return memalign(alignment, size);
3304 }
3305 #elif defined(__APPLE__) || defined(__ANDROID__)
3306 #include <cstdlib>
3307 void *aligned_alloc(size_t alignment, size_t size)
3308 {
3309  // alignment must be >= sizeof(void*)
3310  if(alignment < sizeof(void*))
3311  {
3312  alignment = sizeof(void*);
3313  }
3314 
3315  void *pointer;
3316  if(posix_memalign(&pointer, alignment, size) == 0)
3317  return pointer;
3318  return VMA_NULL;
3319 }
3320 #endif
3321 
3322 // If your compiler is not compatible with C++11 and definition of
3323 // aligned_alloc() function is missing, uncommeting following line may help:
3324 
3325 //#include <malloc.h>
3326 
3327 // Normal assert to check for programmer's errors, especially in Debug configuration.
3328 #ifndef VMA_ASSERT
3329  #ifdef _DEBUG
3330  #define VMA_ASSERT(expr) assert(expr)
3331  #else
3332  #define VMA_ASSERT(expr)
3333  #endif
3334 #endif
3335 
3336 // Assert that will be called very often, like inside data structures e.g. operator[].
3337 // Making it non-empty can make program slow.
3338 #ifndef VMA_HEAVY_ASSERT
3339  #ifdef _DEBUG
3340  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3341  #else
3342  #define VMA_HEAVY_ASSERT(expr)
3343  #endif
3344 #endif
3345 
3346 #ifndef VMA_ALIGN_OF
3347  #define VMA_ALIGN_OF(type) (__alignof(type))
3348 #endif
3349 
3350 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3351  #if defined(_WIN32)
3352  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3353  #else
3354  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3355  #endif
3356 #endif
3357 
3358 #ifndef VMA_SYSTEM_FREE
3359  #if defined(_WIN32)
3360  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3361  #else
3362  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3363  #endif
3364 #endif
3365 
3366 #ifndef VMA_MIN
3367  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3368 #endif
3369 
3370 #ifndef VMA_MAX
3371  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3372 #endif
3373 
3374 #ifndef VMA_SWAP
3375  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3376 #endif
3377 
3378 #ifndef VMA_SORT
3379  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3380 #endif
3381 
3382 #ifndef VMA_DEBUG_LOG
3383  #define VMA_DEBUG_LOG(format, ...)
3384  /*
3385  #define VMA_DEBUG_LOG(format, ...) do { \
3386  printf(format, __VA_ARGS__); \
3387  printf("\n"); \
3388  } while(false)
3389  */
3390 #endif
3391 
3392 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3393 #if VMA_STATS_STRING_ENABLED
3394  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3395  {
3396  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3397  }
3398  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3399  {
3400  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3401  }
3402  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3403  {
3404  snprintf(outStr, strLen, "%p", ptr);
3405  }
3406 #endif
3407 
3408 #ifndef VMA_MUTEX
3409  class VmaMutex
3410  {
3411  public:
3412  void Lock() { m_Mutex.lock(); }
3413  void Unlock() { m_Mutex.unlock(); }
3414  private:
3415  std::mutex m_Mutex;
3416  };
3417  #define VMA_MUTEX VmaMutex
3418 #endif
3419 
3420 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3421 #ifndef VMA_RW_MUTEX
3422  #if VMA_USE_STL_SHARED_MUTEX
3423  // Use std::shared_mutex from C++17.
3424  #include <shared_mutex>
3425  class VmaRWMutex
3426  {
3427  public:
3428  void LockRead() { m_Mutex.lock_shared(); }
3429  void UnlockRead() { m_Mutex.unlock_shared(); }
3430  void LockWrite() { m_Mutex.lock(); }
3431  void UnlockWrite() { m_Mutex.unlock(); }
3432  private:
3433  std::shared_mutex m_Mutex;
3434  };
3435  #define VMA_RW_MUTEX VmaRWMutex
3436  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3437  // Use SRWLOCK from WinAPI.
3438  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3439  class VmaRWMutex
3440  {
3441  public:
3442  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3443  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3444  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3445  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3446  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3447  private:
3448  SRWLOCK m_Lock;
3449  };
3450  #define VMA_RW_MUTEX VmaRWMutex
3451  #else
3452  // Less efficient fallback: Use normal mutex.
3453  class VmaRWMutex
3454  {
3455  public:
3456  void LockRead() { m_Mutex.Lock(); }
3457  void UnlockRead() { m_Mutex.Unlock(); }
3458  void LockWrite() { m_Mutex.Lock(); }
3459  void UnlockWrite() { m_Mutex.Unlock(); }
3460  private:
3461  VMA_MUTEX m_Mutex;
3462  };
3463  #define VMA_RW_MUTEX VmaRWMutex
3464  #endif // #if VMA_USE_STL_SHARED_MUTEX
3465 #endif // #ifndef VMA_RW_MUTEX
3466 
3467 /*
3468 If providing your own implementation, you need to implement a subset of std::atomic:
3469 
3470 - Constructor(uint32_t desired)
3471 - uint32_t load() const
3472 - void store(uint32_t desired)
3473 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3474 */
3475 #ifndef VMA_ATOMIC_UINT32
3476  #include <atomic>
3477  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3478 #endif
3479 
3480 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3481 
3485  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3486 #endif
3487 
3488 #ifndef VMA_DEBUG_ALIGNMENT
3489 
3493  #define VMA_DEBUG_ALIGNMENT (1)
3494 #endif
3495 
3496 #ifndef VMA_DEBUG_MARGIN
3497 
3501  #define VMA_DEBUG_MARGIN (0)
3502 #endif
3503 
3504 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3505 
3509  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3510 #endif
3511 
3512 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3513 
3518  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3519 #endif
3520 
3521 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3522 
3526  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3527 #endif
3528 
3529 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3530 
3534  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3535 #endif
3536 
3537 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3538  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3540 #endif
3541 
3542 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3543  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3545 #endif
3546 
3547 #ifndef VMA_CLASS_NO_COPY
3548  #define VMA_CLASS_NO_COPY(className) \
3549  private: \
3550  className(const className&) = delete; \
3551  className& operator=(const className&) = delete;
3552 #endif
3553 
3554 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3555 
3556 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3557 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3558 
3559 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3560 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3561 
3562 /*******************************************************************************
3563 END OF CONFIGURATION
3564 */
3565 
3566 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3567 
3568 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3569  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3570 
3571 // Returns number of bits set to 1 in (v).
3572 static inline uint32_t VmaCountBitsSet(uint32_t v)
3573 {
3574  uint32_t c = v - ((v >> 1) & 0x55555555);
3575  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3576  c = ((c >> 4) + c) & 0x0F0F0F0F;
3577  c = ((c >> 8) + c) & 0x00FF00FF;
3578  c = ((c >> 16) + c) & 0x0000FFFF;
3579  return c;
3580 }
3581 
3582 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3583 // Use types like uint32_t, uint64_t as T.
3584 template <typename T>
3585 static inline T VmaAlignUp(T val, T align)
3586 {
3587  return (val + align - 1) / align * align;
3588 }
3589 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3590 // Use types like uint32_t, uint64_t as T.
3591 template <typename T>
3592 static inline T VmaAlignDown(T val, T align)
3593 {
3594  return val / align * align;
3595 }
3596 
3597 // Division with mathematical rounding to nearest number.
3598 template <typename T>
3599 static inline T VmaRoundDiv(T x, T y)
3600 {
3601  return (x + (y / (T)2)) / y;
3602 }
3603 
3604 /*
3605 Returns true if given number is a power of two.
3606 T must be unsigned integer number or signed integer but always nonnegative.
3607 For 0 returns true.
3608 */
3609 template <typename T>
3610 inline bool VmaIsPow2(T x)
3611 {
3612  return (x & (x-1)) == 0;
3613 }
3614 
3615 // Returns smallest power of 2 greater or equal to v.
3616 static inline uint32_t VmaNextPow2(uint32_t v)
3617 {
3618  v--;
3619  v |= v >> 1;
3620  v |= v >> 2;
3621  v |= v >> 4;
3622  v |= v >> 8;
3623  v |= v >> 16;
3624  v++;
3625  return v;
3626 }
3627 static inline uint64_t VmaNextPow2(uint64_t v)
3628 {
3629  v--;
3630  v |= v >> 1;
3631  v |= v >> 2;
3632  v |= v >> 4;
3633  v |= v >> 8;
3634  v |= v >> 16;
3635  v |= v >> 32;
3636  v++;
3637  return v;
3638 }
3639 
3640 // Returns largest power of 2 less or equal to v.
3641 static inline uint32_t VmaPrevPow2(uint32_t v)
3642 {
3643  v |= v >> 1;
3644  v |= v >> 2;
3645  v |= v >> 4;
3646  v |= v >> 8;
3647  v |= v >> 16;
3648  v = v ^ (v >> 1);
3649  return v;
3650 }
3651 static inline uint64_t VmaPrevPow2(uint64_t v)
3652 {
3653  v |= v >> 1;
3654  v |= v >> 2;
3655  v |= v >> 4;
3656  v |= v >> 8;
3657  v |= v >> 16;
3658  v |= v >> 32;
3659  v = v ^ (v >> 1);
3660  return v;
3661 }
3662 
3663 static inline bool VmaStrIsEmpty(const char* pStr)
3664 {
3665  return pStr == VMA_NULL || *pStr == '\0';
3666 }
3667 
3668 #if VMA_STATS_STRING_ENABLED
3669 
3670 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3671 {
3672  switch(algorithm)
3673  {
3675  return "Linear";
3677  return "Buddy";
3678  case 0:
3679  return "Default";
3680  default:
3681  VMA_ASSERT(0);
3682  return "";
3683  }
3684 }
3685 
3686 #endif // #if VMA_STATS_STRING_ENABLED
3687 
3688 #ifndef VMA_SORT
3689 
3690 template<typename Iterator, typename Compare>
3691 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3692 {
3693  Iterator centerValue = end; --centerValue;
3694  Iterator insertIndex = beg;
3695  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3696  {
3697  if(cmp(*memTypeIndex, *centerValue))
3698  {
3699  if(insertIndex != memTypeIndex)
3700  {
3701  VMA_SWAP(*memTypeIndex, *insertIndex);
3702  }
3703  ++insertIndex;
3704  }
3705  }
3706  if(insertIndex != centerValue)
3707  {
3708  VMA_SWAP(*insertIndex, *centerValue);
3709  }
3710  return insertIndex;
3711 }
3712 
3713 template<typename Iterator, typename Compare>
3714 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3715 {
3716  if(beg < end)
3717  {
3718  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3719  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3720  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3721  }
3722 }
3723 
3724 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3725 
3726 #endif // #ifndef VMA_SORT
3727 
3728 /*
3729 Returns true if two memory blocks occupy overlapping pages.
3730 ResourceA must be in less memory offset than ResourceB.
3731 
3732 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3733 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3734 */
3735 static inline bool VmaBlocksOnSamePage(
3736  VkDeviceSize resourceAOffset,
3737  VkDeviceSize resourceASize,
3738  VkDeviceSize resourceBOffset,
3739  VkDeviceSize pageSize)
3740 {
3741  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3742  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3743  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3744  VkDeviceSize resourceBStart = resourceBOffset;
3745  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3746  return resourceAEndPage == resourceBStartPage;
3747 }
3748 
3749 enum VmaSuballocationType
3750 {
3751  VMA_SUBALLOCATION_TYPE_FREE = 0,
3752  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3753  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3754  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3755  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3756  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3757  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3758 };
3759 
3760 /*
3761 Returns true if given suballocation types could conflict and must respect
3762 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3763 or linear image and another one is optimal image. If type is unknown, behave
3764 conservatively.
3765 */
3766 static inline bool VmaIsBufferImageGranularityConflict(
3767  VmaSuballocationType suballocType1,
3768  VmaSuballocationType suballocType2)
3769 {
3770  if(suballocType1 > suballocType2)
3771  {
3772  VMA_SWAP(suballocType1, suballocType2);
3773  }
3774 
3775  switch(suballocType1)
3776  {
3777  case VMA_SUBALLOCATION_TYPE_FREE:
3778  return false;
3779  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3780  return true;
3781  case VMA_SUBALLOCATION_TYPE_BUFFER:
3782  return
3783  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3784  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3785  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3786  return
3787  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3788  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3789  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3790  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3791  return
3792  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3793  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3794  return false;
3795  default:
3796  VMA_ASSERT(0);
3797  return true;
3798  }
3799 }
3800 
3801 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3802 {
3803 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3804  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3805  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3806  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3807  {
3808  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3809  }
3810 #else
3811  // no-op
3812 #endif
3813 }
3814 
3815 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3816 {
3817 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3818  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3819  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3820  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3821  {
3822  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3823  {
3824  return false;
3825  }
3826  }
3827 #endif
3828  return true;
3829 }
3830 
3831 /*
3832 Fills structure with parameters of an example buffer to be used for transfers
3833 during GPU memory defragmentation.
3834 */
3835 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3836 {
3837  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3838  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3839  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3840  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3841 }
3842 
3843 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3844 struct VmaMutexLock
3845 {
3846  VMA_CLASS_NO_COPY(VmaMutexLock)
3847 public:
3848  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3849  m_pMutex(useMutex ? &mutex : VMA_NULL)
3850  { if(m_pMutex) { m_pMutex->Lock(); } }
3851  ~VmaMutexLock()
3852  { if(m_pMutex) { m_pMutex->Unlock(); } }
3853 private:
3854  VMA_MUTEX* m_pMutex;
3855 };
3856 
3857 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3858 struct VmaMutexLockRead
3859 {
3860  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3861 public:
3862  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3863  m_pMutex(useMutex ? &mutex : VMA_NULL)
3864  { if(m_pMutex) { m_pMutex->LockRead(); } }
3865  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3866 private:
3867  VMA_RW_MUTEX* m_pMutex;
3868 };
3869 
3870 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3871 struct VmaMutexLockWrite
3872 {
3873  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3874 public:
3875  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3876  m_pMutex(useMutex ? &mutex : VMA_NULL)
3877  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3878  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3879 private:
3880  VMA_RW_MUTEX* m_pMutex;
3881 };
3882 
3883 #if VMA_DEBUG_GLOBAL_MUTEX
3884  static VMA_MUTEX gDebugGlobalMutex;
3885  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3886 #else
3887  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3888 #endif
3889 
3890 // Minimum size of a free suballocation to register it in the free suballocation collection.
3891 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3892 
3893 /*
3894 Performs binary search and returns iterator to first element that is greater or
3895 equal to (key), according to comparison (cmp).
3896 
3897 Cmp should return true if first argument is less than second argument.
3898 
3899 Returned value is the found element, if present in the collection or place where
3900 new element with value (key) should be inserted.
3901 */
3902 template <typename CmpLess, typename IterT, typename KeyT>
3903 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
3904 {
3905  size_t down = 0, up = (end - beg);
3906  while(down < up)
3907  {
3908  const size_t mid = (down + up) / 2;
3909  if(cmp(*(beg+mid), key))
3910  {
3911  down = mid + 1;
3912  }
3913  else
3914  {
3915  up = mid;
3916  }
3917  }
3918  return beg + down;
3919 }
3920 
3921 template<typename CmpLess, typename IterT, typename KeyT>
3922 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3923 {
3924  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3925  beg, end, value, cmp);
3926  if(it == end ||
3927  (!cmp(*it, value) && !cmp(value, *it)))
3928  {
3929  return it;
3930  }
3931  return end;
3932 }
3933 
3934 /*
3935 Returns true if all pointers in the array are not-null and unique.
3936 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3937 T must be pointer type, e.g. VmaAllocation, VmaPool.
3938 */
3939 template<typename T>
3940 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3941 {
3942  for(uint32_t i = 0; i < count; ++i)
3943  {
3944  const T iPtr = arr[i];
3945  if(iPtr == VMA_NULL)
3946  {
3947  return false;
3948  }
3949  for(uint32_t j = i + 1; j < count; ++j)
3950  {
3951  if(iPtr == arr[j])
3952  {
3953  return false;
3954  }
3955  }
3956  }
3957  return true;
3958 }
3959 
3961 // Memory allocation
3962 
3963 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3964 {
3965  if((pAllocationCallbacks != VMA_NULL) &&
3966  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3967  {
3968  return (*pAllocationCallbacks->pfnAllocation)(
3969  pAllocationCallbacks->pUserData,
3970  size,
3971  alignment,
3972  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3973  }
3974  else
3975  {
3976  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3977  }
3978 }
3979 
3980 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3981 {
3982  if((pAllocationCallbacks != VMA_NULL) &&
3983  (pAllocationCallbacks->pfnFree != VMA_NULL))
3984  {
3985  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3986  }
3987  else
3988  {
3989  VMA_SYSTEM_FREE(ptr);
3990  }
3991 }
3992 
3993 template<typename T>
3994 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3995 {
3996  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3997 }
3998 
3999 template<typename T>
4000 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4001 {
4002  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4003 }
4004 
4005 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4006 
4007 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4008 
4009 template<typename T>
4010 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4011 {
4012  ptr->~T();
4013  VmaFree(pAllocationCallbacks, ptr);
4014 }
4015 
4016 template<typename T>
4017 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4018 {
4019  if(ptr != VMA_NULL)
4020  {
4021  for(size_t i = count; i--; )
4022  {
4023  ptr[i].~T();
4024  }
4025  VmaFree(pAllocationCallbacks, ptr);
4026  }
4027 }
4028 
4029 // STL-compatible allocator.
4030 template<typename T>
4031 class VmaStlAllocator
4032 {
4033 public:
4034  const VkAllocationCallbacks* const m_pCallbacks;
4035  typedef T value_type;
4036 
4037  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4038  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4039 
4040  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4041  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4042 
4043  template<typename U>
4044  bool operator==(const VmaStlAllocator<U>& rhs) const
4045  {
4046  return m_pCallbacks == rhs.m_pCallbacks;
4047  }
4048  template<typename U>
4049  bool operator!=(const VmaStlAllocator<U>& rhs) const
4050  {
4051  return m_pCallbacks != rhs.m_pCallbacks;
4052  }
4053 
4054  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4055 };
4056 
4057 #if VMA_USE_STL_VECTOR
4058 
4059 #define VmaVector std::vector
4060 
4061 template<typename T, typename allocatorT>
4062 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4063 {
4064  vec.insert(vec.begin() + index, item);
4065 }
4066 
4067 template<typename T, typename allocatorT>
4068 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4069 {
4070  vec.erase(vec.begin() + index);
4071 }
4072 
4073 #else // #if VMA_USE_STL_VECTOR
4074 
4075 /* Class with interface compatible with subset of std::vector.
4076 T must be POD because constructors and destructors are not called and memcpy is
4077 used for these objects. */
4078 template<typename T, typename AllocatorT>
4079 class VmaVector
4080 {
4081 public:
4082  typedef T value_type;
4083 
4084  VmaVector(const AllocatorT& allocator) :
4085  m_Allocator(allocator),
4086  m_pArray(VMA_NULL),
4087  m_Count(0),
4088  m_Capacity(0)
4089  {
4090  }
4091 
4092  VmaVector(size_t count, const AllocatorT& allocator) :
4093  m_Allocator(allocator),
4094  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4095  m_Count(count),
4096  m_Capacity(count)
4097  {
4098  }
4099 
4100  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4101  // value is unused.
4102  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4103  : VmaVector(count, allocator) {}
4104 
4105  VmaVector(const VmaVector<T, AllocatorT>& src) :
4106  m_Allocator(src.m_Allocator),
4107  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4108  m_Count(src.m_Count),
4109  m_Capacity(src.m_Count)
4110  {
4111  if(m_Count != 0)
4112  {
4113  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4114  }
4115  }
4116 
4117  ~VmaVector()
4118  {
4119  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4120  }
4121 
4122  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4123  {
4124  if(&rhs != this)
4125  {
4126  resize(rhs.m_Count);
4127  if(m_Count != 0)
4128  {
4129  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4130  }
4131  }
4132  return *this;
4133  }
4134 
4135  bool empty() const { return m_Count == 0; }
4136  size_t size() const { return m_Count; }
4137  T* data() { return m_pArray; }
4138  const T* data() const { return m_pArray; }
4139 
4140  T& operator[](size_t index)
4141  {
4142  VMA_HEAVY_ASSERT(index < m_Count);
4143  return m_pArray[index];
4144  }
4145  const T& operator[](size_t index) const
4146  {
4147  VMA_HEAVY_ASSERT(index < m_Count);
4148  return m_pArray[index];
4149  }
4150 
4151  T& front()
4152  {
4153  VMA_HEAVY_ASSERT(m_Count > 0);
4154  return m_pArray[0];
4155  }
4156  const T& front() const
4157  {
4158  VMA_HEAVY_ASSERT(m_Count > 0);
4159  return m_pArray[0];
4160  }
4161  T& back()
4162  {
4163  VMA_HEAVY_ASSERT(m_Count > 0);
4164  return m_pArray[m_Count - 1];
4165  }
4166  const T& back() const
4167  {
4168  VMA_HEAVY_ASSERT(m_Count > 0);
4169  return m_pArray[m_Count - 1];
4170  }
4171 
4172  void reserve(size_t newCapacity, bool freeMemory = false)
4173  {
4174  newCapacity = VMA_MAX(newCapacity, m_Count);
4175 
4176  if((newCapacity < m_Capacity) && !freeMemory)
4177  {
4178  newCapacity = m_Capacity;
4179  }
4180 
4181  if(newCapacity != m_Capacity)
4182  {
4183  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4184  if(m_Count != 0)
4185  {
4186  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4187  }
4188  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4189  m_Capacity = newCapacity;
4190  m_pArray = newArray;
4191  }
4192  }
4193 
4194  void resize(size_t newCount, bool freeMemory = false)
4195  {
4196  size_t newCapacity = m_Capacity;
4197  if(newCount > m_Capacity)
4198  {
4199  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4200  }
4201  else if(freeMemory)
4202  {
4203  newCapacity = newCount;
4204  }
4205 
4206  if(newCapacity != m_Capacity)
4207  {
4208  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4209  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4210  if(elementsToCopy != 0)
4211  {
4212  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4213  }
4214  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4215  m_Capacity = newCapacity;
4216  m_pArray = newArray;
4217  }
4218 
4219  m_Count = newCount;
4220  }
4221 
4222  void clear(bool freeMemory = false)
4223  {
4224  resize(0, freeMemory);
4225  }
4226 
4227  void insert(size_t index, const T& src)
4228  {
4229  VMA_HEAVY_ASSERT(index <= m_Count);
4230  const size_t oldCount = size();
4231  resize(oldCount + 1);
4232  if(index < oldCount)
4233  {
4234  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4235  }
4236  m_pArray[index] = src;
4237  }
4238 
4239  void remove(size_t index)
4240  {
4241  VMA_HEAVY_ASSERT(index < m_Count);
4242  const size_t oldCount = size();
4243  if(index < oldCount - 1)
4244  {
4245  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4246  }
4247  resize(oldCount - 1);
4248  }
4249 
4250  void push_back(const T& src)
4251  {
4252  const size_t newIndex = size();
4253  resize(newIndex + 1);
4254  m_pArray[newIndex] = src;
4255  }
4256 
4257  void pop_back()
4258  {
4259  VMA_HEAVY_ASSERT(m_Count > 0);
4260  resize(size() - 1);
4261  }
4262 
4263  void push_front(const T& src)
4264  {
4265  insert(0, src);
4266  }
4267 
4268  void pop_front()
4269  {
4270  VMA_HEAVY_ASSERT(m_Count > 0);
4271  remove(0);
4272  }
4273 
4274  typedef T* iterator;
4275 
4276  iterator begin() { return m_pArray; }
4277  iterator end() { return m_pArray + m_Count; }
4278 
4279 private:
4280  AllocatorT m_Allocator;
4281  T* m_pArray;
4282  size_t m_Count;
4283  size_t m_Capacity;
4284 };
4285 
4286 template<typename T, typename allocatorT>
4287 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4288 {
4289  vec.insert(index, item);
4290 }
4291 
4292 template<typename T, typename allocatorT>
4293 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4294 {
4295  vec.remove(index);
4296 }
4297 
4298 #endif // #if VMA_USE_STL_VECTOR
4299 
4300 template<typename CmpLess, typename VectorT>
4301 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4302 {
4303  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4304  vector.data(),
4305  vector.data() + vector.size(),
4306  value,
4307  CmpLess()) - vector.data();
4308  VmaVectorInsert(vector, indexToInsert, value);
4309  return indexToInsert;
4310 }
4311 
4312 template<typename CmpLess, typename VectorT>
4313 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4314 {
4315  CmpLess comparator;
4316  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4317  vector.begin(),
4318  vector.end(),
4319  value,
4320  comparator);
4321  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4322  {
4323  size_t indexToRemove = it - vector.begin();
4324  VmaVectorRemove(vector, indexToRemove);
4325  return true;
4326  }
4327  return false;
4328 }
4329 
4331 // class VmaPoolAllocator
4332 
4333 /*
4334 Allocator for objects of type T using a list of arrays (pools) to speed up
4335 allocation. Number of elements that can be allocated is not bounded because
4336 allocator can create multiple blocks.
4337 */
4338 template<typename T>
4339 class VmaPoolAllocator
4340 {
4341  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4342 public:
4343  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4344  ~VmaPoolAllocator();
4345  T* Alloc();
4346  void Free(T* ptr);
4347 
4348 private:
4349  union Item
4350  {
4351  uint32_t NextFreeIndex;
4352  alignas(T) char Value[sizeof(T)];
4353  };
4354 
4355  struct ItemBlock
4356  {
4357  Item* pItems;
4358  uint32_t Capacity;
4359  uint32_t FirstFreeIndex;
4360  };
4361 
4362  const VkAllocationCallbacks* m_pAllocationCallbacks;
4363  const uint32_t m_FirstBlockCapacity;
4364  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4365 
4366  ItemBlock& CreateNewBlock();
4367 };
4368 
4369 template<typename T>
4370 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4371  m_pAllocationCallbacks(pAllocationCallbacks),
4372  m_FirstBlockCapacity(firstBlockCapacity),
4373  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4374 {
4375  VMA_ASSERT(m_FirstBlockCapacity > 1);
4376 }
4377 
4378 template<typename T>
4379 VmaPoolAllocator<T>::~VmaPoolAllocator()
4380 {
4381  for(size_t i = m_ItemBlocks.size(); i--; )
4382  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4383  m_ItemBlocks.clear();
4384 }
4385 
4386 template<typename T>
4387 T* VmaPoolAllocator<T>::Alloc()
4388 {
4389  for(size_t i = m_ItemBlocks.size(); i--; )
4390  {
4391  ItemBlock& block = m_ItemBlocks[i];
4392  // This block has some free items: Use first one.
4393  if(block.FirstFreeIndex != UINT32_MAX)
4394  {
4395  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4396  block.FirstFreeIndex = pItem->NextFreeIndex;
4397  T* result = (T*)&pItem->Value;
4398  new(result)T(); // Explicit constructor call.
4399  return result;
4400  }
4401  }
4402 
4403  // No block has free item: Create new one and use it.
4404  ItemBlock& newBlock = CreateNewBlock();
4405  Item* const pItem = &newBlock.pItems[0];
4406  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4407  T* result = (T*)&pItem->Value;
4408  new(result)T(); // Explicit constructor call.
4409  return result;
4410 }
4411 
4412 template<typename T>
4413 void VmaPoolAllocator<T>::Free(T* ptr)
4414 {
4415  // Search all memory blocks to find ptr.
4416  for(size_t i = m_ItemBlocks.size(); i--; )
4417  {
4418  ItemBlock& block = m_ItemBlocks[i];
4419 
4420  // Casting to union.
4421  Item* pItemPtr;
4422  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4423 
4424  // Check if pItemPtr is in address range of this block.
4425  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4426  {
4427  ptr->~T(); // Explicit destructor call.
4428  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4429  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4430  block.FirstFreeIndex = index;
4431  return;
4432  }
4433  }
4434  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4435 }
4436 
4437 template<typename T>
4438 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4439 {
4440  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4441  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4442 
4443  const ItemBlock newBlock = {
4444  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4445  newBlockCapacity,
4446  0 };
4447 
4448  m_ItemBlocks.push_back(newBlock);
4449 
4450  // Setup singly-linked list of all free items in this block.
4451  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4452  newBlock.pItems[i].NextFreeIndex = i + 1;
4453  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4454  return m_ItemBlocks.back();
4455 }
4456 
4458 // class VmaRawList, VmaList
4459 
4460 #if VMA_USE_STL_LIST
4461 
4462 #define VmaList std::list
4463 
4464 #else // #if VMA_USE_STL_LIST
4465 
4466 template<typename T>
4467 struct VmaListItem
4468 {
4469  VmaListItem* pPrev;
4470  VmaListItem* pNext;
4471  T Value;
4472 };
4473 
4474 // Doubly linked list.
4475 template<typename T>
4476 class VmaRawList
4477 {
4478  VMA_CLASS_NO_COPY(VmaRawList)
4479 public:
4480  typedef VmaListItem<T> ItemType;
4481 
4482  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4483  ~VmaRawList();
4484  void Clear();
4485 
4486  size_t GetCount() const { return m_Count; }
4487  bool IsEmpty() const { return m_Count == 0; }
4488 
4489  ItemType* Front() { return m_pFront; }
4490  const ItemType* Front() const { return m_pFront; }
4491  ItemType* Back() { return m_pBack; }
4492  const ItemType* Back() const { return m_pBack; }
4493 
4494  ItemType* PushBack();
4495  ItemType* PushFront();
4496  ItemType* PushBack(const T& value);
4497  ItemType* PushFront(const T& value);
4498  void PopBack();
4499  void PopFront();
4500 
4501  // Item can be null - it means PushBack.
4502  ItemType* InsertBefore(ItemType* pItem);
4503  // Item can be null - it means PushFront.
4504  ItemType* InsertAfter(ItemType* pItem);
4505 
4506  ItemType* InsertBefore(ItemType* pItem, const T& value);
4507  ItemType* InsertAfter(ItemType* pItem, const T& value);
4508 
4509  void Remove(ItemType* pItem);
4510 
4511 private:
4512  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4513  VmaPoolAllocator<ItemType> m_ItemAllocator;
4514  ItemType* m_pFront;
4515  ItemType* m_pBack;
4516  size_t m_Count;
4517 };
4518 
4519 template<typename T>
4520 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4521  m_pAllocationCallbacks(pAllocationCallbacks),
4522  m_ItemAllocator(pAllocationCallbacks, 128),
4523  m_pFront(VMA_NULL),
4524  m_pBack(VMA_NULL),
4525  m_Count(0)
4526 {
4527 }
4528 
4529 template<typename T>
4530 VmaRawList<T>::~VmaRawList()
4531 {
4532  // Intentionally not calling Clear, because that would be unnecessary
4533  // computations to return all items to m_ItemAllocator as free.
4534 }
4535 
4536 template<typename T>
4537 void VmaRawList<T>::Clear()
4538 {
4539  if(IsEmpty() == false)
4540  {
4541  ItemType* pItem = m_pBack;
4542  while(pItem != VMA_NULL)
4543  {
4544  ItemType* const pPrevItem = pItem->pPrev;
4545  m_ItemAllocator.Free(pItem);
4546  pItem = pPrevItem;
4547  }
4548  m_pFront = VMA_NULL;
4549  m_pBack = VMA_NULL;
4550  m_Count = 0;
4551  }
4552 }
4553 
4554 template<typename T>
4555 VmaListItem<T>* VmaRawList<T>::PushBack()
4556 {
4557  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4558  pNewItem->pNext = VMA_NULL;
4559  if(IsEmpty())
4560  {
4561  pNewItem->pPrev = VMA_NULL;
4562  m_pFront = pNewItem;
4563  m_pBack = pNewItem;
4564  m_Count = 1;
4565  }
4566  else
4567  {
4568  pNewItem->pPrev = m_pBack;
4569  m_pBack->pNext = pNewItem;
4570  m_pBack = pNewItem;
4571  ++m_Count;
4572  }
4573  return pNewItem;
4574 }
4575 
4576 template<typename T>
4577 VmaListItem<T>* VmaRawList<T>::PushFront()
4578 {
4579  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4580  pNewItem->pPrev = VMA_NULL;
4581  if(IsEmpty())
4582  {
4583  pNewItem->pNext = VMA_NULL;
4584  m_pFront = pNewItem;
4585  m_pBack = pNewItem;
4586  m_Count = 1;
4587  }
4588  else
4589  {
4590  pNewItem->pNext = m_pFront;
4591  m_pFront->pPrev = pNewItem;
4592  m_pFront = pNewItem;
4593  ++m_Count;
4594  }
4595  return pNewItem;
4596 }
4597 
4598 template<typename T>
4599 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4600 {
4601  ItemType* const pNewItem = PushBack();
4602  pNewItem->Value = value;
4603  return pNewItem;
4604 }
4605 
4606 template<typename T>
4607 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4608 {
4609  ItemType* const pNewItem = PushFront();
4610  pNewItem->Value = value;
4611  return pNewItem;
4612 }
4613 
4614 template<typename T>
4615 void VmaRawList<T>::PopBack()
4616 {
4617  VMA_HEAVY_ASSERT(m_Count > 0);
4618  ItemType* const pBackItem = m_pBack;
4619  ItemType* const pPrevItem = pBackItem->pPrev;
4620  if(pPrevItem != VMA_NULL)
4621  {
4622  pPrevItem->pNext = VMA_NULL;
4623  }
4624  m_pBack = pPrevItem;
4625  m_ItemAllocator.Free(pBackItem);
4626  --m_Count;
4627 }
4628 
4629 template<typename T>
4630 void VmaRawList<T>::PopFront()
4631 {
4632  VMA_HEAVY_ASSERT(m_Count > 0);
4633  ItemType* const pFrontItem = m_pFront;
4634  ItemType* const pNextItem = pFrontItem->pNext;
4635  if(pNextItem != VMA_NULL)
4636  {
4637  pNextItem->pPrev = VMA_NULL;
4638  }
4639  m_pFront = pNextItem;
4640  m_ItemAllocator.Free(pFrontItem);
4641  --m_Count;
4642 }
4643 
4644 template<typename T>
4645 void VmaRawList<T>::Remove(ItemType* pItem)
4646 {
4647  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4648  VMA_HEAVY_ASSERT(m_Count > 0);
4649 
4650  if(pItem->pPrev != VMA_NULL)
4651  {
4652  pItem->pPrev->pNext = pItem->pNext;
4653  }
4654  else
4655  {
4656  VMA_HEAVY_ASSERT(m_pFront == pItem);
4657  m_pFront = pItem->pNext;
4658  }
4659 
4660  if(pItem->pNext != VMA_NULL)
4661  {
4662  pItem->pNext->pPrev = pItem->pPrev;
4663  }
4664  else
4665  {
4666  VMA_HEAVY_ASSERT(m_pBack == pItem);
4667  m_pBack = pItem->pPrev;
4668  }
4669 
4670  m_ItemAllocator.Free(pItem);
4671  --m_Count;
4672 }
4673 
4674 template<typename T>
4675 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4676 {
4677  if(pItem != VMA_NULL)
4678  {
4679  ItemType* const prevItem = pItem->pPrev;
4680  ItemType* const newItem = m_ItemAllocator.Alloc();
4681  newItem->pPrev = prevItem;
4682  newItem->pNext = pItem;
4683  pItem->pPrev = newItem;
4684  if(prevItem != VMA_NULL)
4685  {
4686  prevItem->pNext = newItem;
4687  }
4688  else
4689  {
4690  VMA_HEAVY_ASSERT(m_pFront == pItem);
4691  m_pFront = newItem;
4692  }
4693  ++m_Count;
4694  return newItem;
4695  }
4696  else
4697  return PushBack();
4698 }
4699 
4700 template<typename T>
4701 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4702 {
4703  if(pItem != VMA_NULL)
4704  {
4705  ItemType* const nextItem = pItem->pNext;
4706  ItemType* const newItem = m_ItemAllocator.Alloc();
4707  newItem->pNext = nextItem;
4708  newItem->pPrev = pItem;
4709  pItem->pNext = newItem;
4710  if(nextItem != VMA_NULL)
4711  {
4712  nextItem->pPrev = newItem;
4713  }
4714  else
4715  {
4716  VMA_HEAVY_ASSERT(m_pBack == pItem);
4717  m_pBack = newItem;
4718  }
4719  ++m_Count;
4720  return newItem;
4721  }
4722  else
4723  return PushFront();
4724 }
4725 
4726 template<typename T>
4727 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4728 {
4729  ItemType* const newItem = InsertBefore(pItem);
4730  newItem->Value = value;
4731  return newItem;
4732 }
4733 
4734 template<typename T>
4735 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4736 {
4737  ItemType* const newItem = InsertAfter(pItem);
4738  newItem->Value = value;
4739  return newItem;
4740 }
4741 
4742 template<typename T, typename AllocatorT>
4743 class VmaList
4744 {
4745  VMA_CLASS_NO_COPY(VmaList)
4746 public:
4747  class iterator
4748  {
4749  public:
4750  iterator() :
4751  m_pList(VMA_NULL),
4752  m_pItem(VMA_NULL)
4753  {
4754  }
4755 
4756  T& operator*() const
4757  {
4758  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4759  return m_pItem->Value;
4760  }
4761  T* operator->() const
4762  {
4763  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4764  return &m_pItem->Value;
4765  }
4766 
4767  iterator& operator++()
4768  {
4769  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4770  m_pItem = m_pItem->pNext;
4771  return *this;
4772  }
4773  iterator& operator--()
4774  {
4775  if(m_pItem != VMA_NULL)
4776  {
4777  m_pItem = m_pItem->pPrev;
4778  }
4779  else
4780  {
4781  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4782  m_pItem = m_pList->Back();
4783  }
4784  return *this;
4785  }
4786 
4787  iterator operator++(int)
4788  {
4789  iterator result = *this;
4790  ++*this;
4791  return result;
4792  }
4793  iterator operator--(int)
4794  {
4795  iterator result = *this;
4796  --*this;
4797  return result;
4798  }
4799 
4800  bool operator==(const iterator& rhs) const
4801  {
4802  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4803  return m_pItem == rhs.m_pItem;
4804  }
4805  bool operator!=(const iterator& rhs) const
4806  {
4807  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4808  return m_pItem != rhs.m_pItem;
4809  }
4810 
4811  private:
4812  VmaRawList<T>* m_pList;
4813  VmaListItem<T>* m_pItem;
4814 
4815  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4816  m_pList(pList),
4817  m_pItem(pItem)
4818  {
4819  }
4820 
4821  friend class VmaList<T, AllocatorT>;
4822  };
4823 
4824  class const_iterator
4825  {
4826  public:
4827  const_iterator() :
4828  m_pList(VMA_NULL),
4829  m_pItem(VMA_NULL)
4830  {
4831  }
4832 
4833  const_iterator(const iterator& src) :
4834  m_pList(src.m_pList),
4835  m_pItem(src.m_pItem)
4836  {
4837  }
4838 
4839  const T& operator*() const
4840  {
4841  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4842  return m_pItem->Value;
4843  }
4844  const T* operator->() const
4845  {
4846  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4847  return &m_pItem->Value;
4848  }
4849 
4850  const_iterator& operator++()
4851  {
4852  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4853  m_pItem = m_pItem->pNext;
4854  return *this;
4855  }
4856  const_iterator& operator--()
4857  {
4858  if(m_pItem != VMA_NULL)
4859  {
4860  m_pItem = m_pItem->pPrev;
4861  }
4862  else
4863  {
4864  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4865  m_pItem = m_pList->Back();
4866  }
4867  return *this;
4868  }
4869 
4870  const_iterator operator++(int)
4871  {
4872  const_iterator result = *this;
4873  ++*this;
4874  return result;
4875  }
4876  const_iterator operator--(int)
4877  {
4878  const_iterator result = *this;
4879  --*this;
4880  return result;
4881  }
4882 
4883  bool operator==(const const_iterator& rhs) const
4884  {
4885  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4886  return m_pItem == rhs.m_pItem;
4887  }
4888  bool operator!=(const const_iterator& rhs) const
4889  {
4890  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4891  return m_pItem != rhs.m_pItem;
4892  }
4893 
4894  private:
4895  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4896  m_pList(pList),
4897  m_pItem(pItem)
4898  {
4899  }
4900 
4901  const VmaRawList<T>* m_pList;
4902  const VmaListItem<T>* m_pItem;
4903 
4904  friend class VmaList<T, AllocatorT>;
4905  };
4906 
4907  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4908 
4909  bool empty() const { return m_RawList.IsEmpty(); }
4910  size_t size() const { return m_RawList.GetCount(); }
4911 
4912  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4913  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4914 
4915  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4916  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4917 
4918  void clear() { m_RawList.Clear(); }
4919  void push_back(const T& value) { m_RawList.PushBack(value); }
4920  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4921  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4922 
4923 private:
4924  VmaRawList<T> m_RawList;
4925 };
4926 
4927 #endif // #if VMA_USE_STL_LIST
4928 
4930 // class VmaMap
4931 
4932 // Unused in this version.
4933 #if 0
4934 
4935 #if VMA_USE_STL_UNORDERED_MAP
4936 
4937 #define VmaPair std::pair
4938 
4939 #define VMA_MAP_TYPE(KeyT, ValueT) \
4940  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4941 
4942 #else // #if VMA_USE_STL_UNORDERED_MAP
4943 
4944 template<typename T1, typename T2>
4945 struct VmaPair
4946 {
4947  T1 first;
4948  T2 second;
4949 
4950  VmaPair() : first(), second() { }
4951  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4952 };
4953 
4954 /* Class compatible with subset of interface of std::unordered_map.
4955 KeyT, ValueT must be POD because they will be stored in VmaVector.
4956 */
4957 template<typename KeyT, typename ValueT>
4958 class VmaMap
4959 {
4960 public:
4961  typedef VmaPair<KeyT, ValueT> PairType;
4962  typedef PairType* iterator;
4963 
4964  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4965 
4966  iterator begin() { return m_Vector.begin(); }
4967  iterator end() { return m_Vector.end(); }
4968 
4969  void insert(const PairType& pair);
4970  iterator find(const KeyT& key);
4971  void erase(iterator it);
4972 
4973 private:
4974  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4975 };
4976 
4977 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4978 
4979 template<typename FirstT, typename SecondT>
4980 struct VmaPairFirstLess
4981 {
4982  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4983  {
4984  return lhs.first < rhs.first;
4985  }
4986  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4987  {
4988  return lhs.first < rhsFirst;
4989  }
4990 };
4991 
4992 template<typename KeyT, typename ValueT>
4993 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4994 {
4995  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4996  m_Vector.data(),
4997  m_Vector.data() + m_Vector.size(),
4998  pair,
4999  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5000  VmaVectorInsert(m_Vector, indexToInsert, pair);
5001 }
5002 
5003 template<typename KeyT, typename ValueT>
5004 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5005 {
5006  PairType* it = VmaBinaryFindFirstNotLess(
5007  m_Vector.data(),
5008  m_Vector.data() + m_Vector.size(),
5009  key,
5010  VmaPairFirstLess<KeyT, ValueT>());
5011  if((it != m_Vector.end()) && (it->first == key))
5012  {
5013  return it;
5014  }
5015  else
5016  {
5017  return m_Vector.end();
5018  }
5019 }
5020 
5021 template<typename KeyT, typename ValueT>
5022 void VmaMap<KeyT, ValueT>::erase(iterator it)
5023 {
5024  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5025 }
5026 
5027 #endif // #if VMA_USE_STL_UNORDERED_MAP
5028 
5029 #endif // #if 0
5030 
5032 
5033 class VmaDeviceMemoryBlock;
5034 
5035 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5036 
5037 struct VmaAllocation_T
5038 {
5039 private:
5040  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5041 
5042  enum FLAGS
5043  {
5044  FLAG_USER_DATA_STRING = 0x01,
5045  };
5046 
5047 public:
5048  enum ALLOCATION_TYPE
5049  {
5050  ALLOCATION_TYPE_NONE,
5051  ALLOCATION_TYPE_BLOCK,
5052  ALLOCATION_TYPE_DEDICATED,
5053  };
5054 
5055  /*
5056  This struct is allocated using VmaPoolAllocator.
5057  */
5058 
5059  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5060  {
5061  m_Alignment = 1;
5062  m_Size = 0;
5063  m_pUserData = VMA_NULL;
5064  m_LastUseFrameIndex = currentFrameIndex;
5065  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5066  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5067  m_MapCount = 0;
5068  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5069 
5070 #if VMA_STATS_STRING_ENABLED
5071  m_CreationFrameIndex = currentFrameIndex;
5072  m_BufferImageUsage = 0;
5073 #endif
5074  }
5075 
5076  void Dtor()
5077  {
5078  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5079 
5080  // Check if owned string was freed.
5081  VMA_ASSERT(m_pUserData == VMA_NULL);
5082  }
5083 
5084  void InitBlockAllocation(
5085  VmaDeviceMemoryBlock* block,
5086  VkDeviceSize offset,
5087  VkDeviceSize alignment,
5088  VkDeviceSize size,
5089  VmaSuballocationType suballocationType,
5090  bool mapped,
5091  bool canBecomeLost)
5092  {
5093  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5094  VMA_ASSERT(block != VMA_NULL);
5095  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5096  m_Alignment = alignment;
5097  m_Size = size;
5098  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5099  m_SuballocationType = (uint8_t)suballocationType;
5100  m_BlockAllocation.m_Block = block;
5101  m_BlockAllocation.m_Offset = offset;
5102  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5103  }
5104 
5105  void InitLost()
5106  {
5107  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5108  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5109  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5110  m_BlockAllocation.m_Block = VMA_NULL;
5111  m_BlockAllocation.m_Offset = 0;
5112  m_BlockAllocation.m_CanBecomeLost = true;
5113  }
5114 
5115  void ChangeBlockAllocation(
5116  VmaAllocator hAllocator,
5117  VmaDeviceMemoryBlock* block,
5118  VkDeviceSize offset);
5119 
5120  void ChangeOffset(VkDeviceSize newOffset);
5121 
5122  // pMappedData not null means allocation is created with MAPPED flag.
5123  void InitDedicatedAllocation(
5124  uint32_t memoryTypeIndex,
5125  VkDeviceMemory hMemory,
5126  VmaSuballocationType suballocationType,
5127  void* pMappedData,
5128  VkDeviceSize size)
5129  {
5130  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5131  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5132  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5133  m_Alignment = 0;
5134  m_Size = size;
5135  m_SuballocationType = (uint8_t)suballocationType;
5136  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5137  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5138  m_DedicatedAllocation.m_hMemory = hMemory;
5139  m_DedicatedAllocation.m_pMappedData = pMappedData;
5140  }
5141 
5142  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5143  VkDeviceSize GetAlignment() const { return m_Alignment; }
5144  VkDeviceSize GetSize() const { return m_Size; }
5145  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5146  void* GetUserData() const { return m_pUserData; }
5147  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5148  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5149 
5150  VmaDeviceMemoryBlock* GetBlock() const
5151  {
5152  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5153  return m_BlockAllocation.m_Block;
5154  }
5155  VkDeviceSize GetOffset() const;
5156  VkDeviceMemory GetMemory() const;
5157  uint32_t GetMemoryTypeIndex() const;
5158  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5159  void* GetMappedData() const;
5160  bool CanBecomeLost() const;
5161 
5162  uint32_t GetLastUseFrameIndex() const
5163  {
5164  return m_LastUseFrameIndex.load();
5165  }
5166  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5167  {
5168  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5169  }
5170  /*
5171  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5172  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5173  - Else, returns false.
5174 
5175  If hAllocation is already lost, assert - you should not call it then.
5176  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5177  */
5178  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5179 
5180  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5181  {
5182  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5183  outInfo.blockCount = 1;
5184  outInfo.allocationCount = 1;
5185  outInfo.unusedRangeCount = 0;
5186  outInfo.usedBytes = m_Size;
5187  outInfo.unusedBytes = 0;
5188  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5189  outInfo.unusedRangeSizeMin = UINT64_MAX;
5190  outInfo.unusedRangeSizeMax = 0;
5191  }
5192 
5193  void BlockAllocMap();
5194  void BlockAllocUnmap();
5195  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5196  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5197 
5198 #if VMA_STATS_STRING_ENABLED
5199  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5200  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5201 
5202  void InitBufferImageUsage(uint32_t bufferImageUsage)
5203  {
5204  VMA_ASSERT(m_BufferImageUsage == 0);
5205  m_BufferImageUsage = bufferImageUsage;
5206  }
5207 
5208  void PrintParameters(class VmaJsonWriter& json) const;
5209 #endif
5210 
5211 private:
5212  VkDeviceSize m_Alignment;
5213  VkDeviceSize m_Size;
5214  void* m_pUserData;
5215  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5216  uint8_t m_Type; // ALLOCATION_TYPE
5217  uint8_t m_SuballocationType; // VmaSuballocationType
5218  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5219  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5220  uint8_t m_MapCount;
5221  uint8_t m_Flags; // enum FLAGS
5222 
5223  // Allocation out of VmaDeviceMemoryBlock.
5224  struct BlockAllocation
5225  {
5226  VmaDeviceMemoryBlock* m_Block;
5227  VkDeviceSize m_Offset;
5228  bool m_CanBecomeLost;
5229  };
5230 
5231  // Allocation for an object that has its own private VkDeviceMemory.
5232  struct DedicatedAllocation
5233  {
5234  uint32_t m_MemoryTypeIndex;
5235  VkDeviceMemory m_hMemory;
5236  void* m_pMappedData; // Not null means memory is mapped.
5237  };
5238 
5239  union
5240  {
5241  // Allocation out of VmaDeviceMemoryBlock.
5242  BlockAllocation m_BlockAllocation;
5243  // Allocation for an object that has its own private VkDeviceMemory.
5244  DedicatedAllocation m_DedicatedAllocation;
5245  };
5246 
5247 #if VMA_STATS_STRING_ENABLED
5248  uint32_t m_CreationFrameIndex;
5249  uint32_t m_BufferImageUsage; // 0 if unknown.
5250 #endif
5251 
5252  void FreeUserDataString(VmaAllocator hAllocator);
5253 };
5254 
5255 /*
5256 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5257 allocated memory block or free.
5258 */
5259 struct VmaSuballocation
5260 {
5261  VkDeviceSize offset;
5262  VkDeviceSize size;
5263  VmaAllocation hAllocation;
5264  VmaSuballocationType type;
5265 };
5266 
5267 // Comparator for offsets.
5268 struct VmaSuballocationOffsetLess
5269 {
5270  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5271  {
5272  return lhs.offset < rhs.offset;
5273  }
5274 };
5275 struct VmaSuballocationOffsetGreater
5276 {
5277  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5278  {
5279  return lhs.offset > rhs.offset;
5280  }
5281 };
5282 
5283 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5284 
5285 // Cost of one additional allocation lost, as equivalent in bytes.
5286 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5287 
5288 enum class VmaAllocationRequestType
5289 {
5290  Normal,
5291  // Used by "Linear" algorithm.
5292  UpperAddress,
5293  EndOf1st,
5294  EndOf2nd,
5295 };
5296 
5297 /*
5298 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5299 
5300 If canMakeOtherLost was false:
5301 - item points to a FREE suballocation.
5302 - itemsToMakeLostCount is 0.
5303 
5304 If canMakeOtherLost was true:
5305 - item points to first of sequence of suballocations, which are either FREE,
5306  or point to VmaAllocations that can become lost.
5307 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5308  the requested allocation to succeed.
5309 */
5310 struct VmaAllocationRequest
5311 {
5312  VkDeviceSize offset;
5313  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5314  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5315  VmaSuballocationList::iterator item;
5316  size_t itemsToMakeLostCount;
5317  void* customData;
5318  VmaAllocationRequestType type;
5319 
5320  VkDeviceSize CalcCost() const
5321  {
5322  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5323  }
5324 };
5325 
5326 /*
5327 Data structure used for bookkeeping of allocations and unused ranges of memory
5328 in a single VkDeviceMemory block.
5329 */
5330 class VmaBlockMetadata
5331 {
5332 public:
5333  VmaBlockMetadata(VmaAllocator hAllocator);
5334  virtual ~VmaBlockMetadata() { }
5335  virtual void Init(VkDeviceSize size) { m_Size = size; }
5336 
5337  // Validates all data structures inside this object. If not valid, returns false.
5338  virtual bool Validate() const = 0;
5339  VkDeviceSize GetSize() const { return m_Size; }
5340  virtual size_t GetAllocationCount() const = 0;
5341  virtual VkDeviceSize GetSumFreeSize() const = 0;
5342  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5343  // Returns true if this block is empty - contains only single free suballocation.
5344  virtual bool IsEmpty() const = 0;
5345 
5346  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5347  // Shouldn't modify blockCount.
5348  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5349 
5350 #if VMA_STATS_STRING_ENABLED
5351  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5352 #endif
5353 
5354  // Tries to find a place for suballocation with given parameters inside this block.
5355  // If succeeded, fills pAllocationRequest and returns true.
5356  // If failed, returns false.
5357  virtual bool CreateAllocationRequest(
5358  uint32_t currentFrameIndex,
5359  uint32_t frameInUseCount,
5360  VkDeviceSize bufferImageGranularity,
5361  VkDeviceSize allocSize,
5362  VkDeviceSize allocAlignment,
5363  bool upperAddress,
5364  VmaSuballocationType allocType,
5365  bool canMakeOtherLost,
5366  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5367  uint32_t strategy,
5368  VmaAllocationRequest* pAllocationRequest) = 0;
5369 
5370  virtual bool MakeRequestedAllocationsLost(
5371  uint32_t currentFrameIndex,
5372  uint32_t frameInUseCount,
5373  VmaAllocationRequest* pAllocationRequest) = 0;
5374 
5375  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5376 
5377  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5378 
5379  // Makes actual allocation based on request. Request must already be checked and valid.
5380  virtual void Alloc(
5381  const VmaAllocationRequest& request,
5382  VmaSuballocationType type,
5383  VkDeviceSize allocSize,
5384  VmaAllocation hAllocation) = 0;
5385 
5386  // Frees suballocation assigned to given memory region.
5387  virtual void Free(const VmaAllocation allocation) = 0;
5388  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5389 
5390 protected:
5391  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5392 
5393 #if VMA_STATS_STRING_ENABLED
5394  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5395  VkDeviceSize unusedBytes,
5396  size_t allocationCount,
5397  size_t unusedRangeCount) const;
5398  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5399  VkDeviceSize offset,
5400  VmaAllocation hAllocation) const;
5401  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5402  VkDeviceSize offset,
5403  VkDeviceSize size) const;
5404  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5405 #endif
5406 
5407 private:
5408  VkDeviceSize m_Size;
5409  const VkAllocationCallbacks* m_pAllocationCallbacks;
5410 };
5411 
5412 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5413  VMA_ASSERT(0 && "Validation failed: " #cond); \
5414  return false; \
5415  } } while(false)
5416 
5417 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5418 {
5419  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5420 public:
5421  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5422  virtual ~VmaBlockMetadata_Generic();
5423  virtual void Init(VkDeviceSize size);
5424 
5425  virtual bool Validate() const;
5426  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5427  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5428  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5429  virtual bool IsEmpty() const;
5430 
5431  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5432  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5433 
5434 #if VMA_STATS_STRING_ENABLED
5435  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5436 #endif
5437 
5438  virtual bool CreateAllocationRequest(
5439  uint32_t currentFrameIndex,
5440  uint32_t frameInUseCount,
5441  VkDeviceSize bufferImageGranularity,
5442  VkDeviceSize allocSize,
5443  VkDeviceSize allocAlignment,
5444  bool upperAddress,
5445  VmaSuballocationType allocType,
5446  bool canMakeOtherLost,
5447  uint32_t strategy,
5448  VmaAllocationRequest* pAllocationRequest);
5449 
5450  virtual bool MakeRequestedAllocationsLost(
5451  uint32_t currentFrameIndex,
5452  uint32_t frameInUseCount,
5453  VmaAllocationRequest* pAllocationRequest);
5454 
5455  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5456 
5457  virtual VkResult CheckCorruption(const void* pBlockData);
5458 
5459  virtual void Alloc(
5460  const VmaAllocationRequest& request,
5461  VmaSuballocationType type,
5462  VkDeviceSize allocSize,
5463  VmaAllocation hAllocation);
5464 
5465  virtual void Free(const VmaAllocation allocation);
5466  virtual void FreeAtOffset(VkDeviceSize offset);
5467 
5469  // For defragmentation
5470 
5471  bool IsBufferImageGranularityConflictPossible(
5472  VkDeviceSize bufferImageGranularity,
5473  VmaSuballocationType& inOutPrevSuballocType) const;
5474 
5475 private:
5476  friend class VmaDefragmentationAlgorithm_Generic;
5477  friend class VmaDefragmentationAlgorithm_Fast;
5478 
5479  uint32_t m_FreeCount;
5480  VkDeviceSize m_SumFreeSize;
5481  VmaSuballocationList m_Suballocations;
5482  // Suballocations that are free and have size greater than certain threshold.
5483  // Sorted by size, ascending.
5484  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5485 
5486  bool ValidateFreeSuballocationList() const;
5487 
5488  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5489  // If yes, fills pOffset and returns true. If no, returns false.
5490  bool CheckAllocation(
5491  uint32_t currentFrameIndex,
5492  uint32_t frameInUseCount,
5493  VkDeviceSize bufferImageGranularity,
5494  VkDeviceSize allocSize,
5495  VkDeviceSize allocAlignment,
5496  VmaSuballocationType allocType,
5497  VmaSuballocationList::const_iterator suballocItem,
5498  bool canMakeOtherLost,
5499  VkDeviceSize* pOffset,
5500  size_t* itemsToMakeLostCount,
5501  VkDeviceSize* pSumFreeSize,
5502  VkDeviceSize* pSumItemSize) const;
5503  // Given free suballocation, it merges it with following one, which must also be free.
5504  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5505  // Releases given suballocation, making it free.
5506  // Merges it with adjacent free suballocations if applicable.
5507  // Returns iterator to new free suballocation at this place.
5508  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5509  // Given free suballocation, it inserts it into sorted list of
5510  // m_FreeSuballocationsBySize if it's suitable.
5511  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5512  // Given free suballocation, it removes it from sorted list of
5513  // m_FreeSuballocationsBySize if it's suitable.
5514  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5515 };
5516 
5517 /*
5518 Allocations and their references in internal data structure look like this:
5519 
5520 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5521 
5522  0 +-------+
5523  | |
5524  | |
5525  | |
5526  +-------+
5527  | Alloc | 1st[m_1stNullItemsBeginCount]
5528  +-------+
5529  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5530  +-------+
5531  | ... |
5532  +-------+
5533  | Alloc | 1st[1st.size() - 1]
5534  +-------+
5535  | |
5536  | |
5537  | |
5538 GetSize() +-------+
5539 
5540 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5541 
5542  0 +-------+
5543  | Alloc | 2nd[0]
5544  +-------+
5545  | Alloc | 2nd[1]
5546  +-------+
5547  | ... |
5548  +-------+
5549  | Alloc | 2nd[2nd.size() - 1]
5550  +-------+
5551  | |
5552  | |
5553  | |
5554  +-------+
5555  | Alloc | 1st[m_1stNullItemsBeginCount]
5556  +-------+
5557  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5558  +-------+
5559  | ... |
5560  +-------+
5561  | Alloc | 1st[1st.size() - 1]
5562  +-------+
5563  | |
5564 GetSize() +-------+
5565 
5566 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5567 
5568  0 +-------+
5569  | |
5570  | |
5571  | |
5572  +-------+
5573  | Alloc | 1st[m_1stNullItemsBeginCount]
5574  +-------+
5575  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5576  +-------+
5577  | ... |
5578  +-------+
5579  | Alloc | 1st[1st.size() - 1]
5580  +-------+
5581  | |
5582  | |
5583  | |
5584  +-------+
5585  | Alloc | 2nd[2nd.size() - 1]
5586  +-------+
5587  | ... |
5588  +-------+
5589  | Alloc | 2nd[1]
5590  +-------+
5591  | Alloc | 2nd[0]
5592 GetSize() +-------+
5593 
5594 */
5595 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5596 {
5597  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5598 public:
5599  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5600  virtual ~VmaBlockMetadata_Linear();
5601  virtual void Init(VkDeviceSize size);
5602 
5603  virtual bool Validate() const;
5604  virtual size_t GetAllocationCount() const;
5605  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5606  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5607  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5608 
5609  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5610  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5611 
5612 #if VMA_STATS_STRING_ENABLED
5613  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5614 #endif
5615 
5616  virtual bool CreateAllocationRequest(
5617  uint32_t currentFrameIndex,
5618  uint32_t frameInUseCount,
5619  VkDeviceSize bufferImageGranularity,
5620  VkDeviceSize allocSize,
5621  VkDeviceSize allocAlignment,
5622  bool upperAddress,
5623  VmaSuballocationType allocType,
5624  bool canMakeOtherLost,
5625  uint32_t strategy,
5626  VmaAllocationRequest* pAllocationRequest);
5627 
5628  virtual bool MakeRequestedAllocationsLost(
5629  uint32_t currentFrameIndex,
5630  uint32_t frameInUseCount,
5631  VmaAllocationRequest* pAllocationRequest);
5632 
5633  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5634 
5635  virtual VkResult CheckCorruption(const void* pBlockData);
5636 
5637  virtual void Alloc(
5638  const VmaAllocationRequest& request,
5639  VmaSuballocationType type,
5640  VkDeviceSize allocSize,
5641  VmaAllocation hAllocation);
5642 
5643  virtual void Free(const VmaAllocation allocation);
5644  virtual void FreeAtOffset(VkDeviceSize offset);
5645 
5646 private:
5647  /*
5648  There are two suballocation vectors, used in ping-pong way.
5649  The one with index m_1stVectorIndex is called 1st.
5650  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5651  2nd can be non-empty only when 1st is not empty.
5652  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5653  */
5654  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5655 
5656  enum SECOND_VECTOR_MODE
5657  {
5658  SECOND_VECTOR_EMPTY,
5659  /*
5660  Suballocations in 2nd vector are created later than the ones in 1st, but they
5661  all have smaller offset.
5662  */
5663  SECOND_VECTOR_RING_BUFFER,
5664  /*
5665  Suballocations in 2nd vector are upper side of double stack.
5666  They all have offsets higher than those in 1st vector.
5667  Top of this stack means smaller offsets, but higher indices in this vector.
5668  */
5669  SECOND_VECTOR_DOUBLE_STACK,
5670  };
5671 
5672  VkDeviceSize m_SumFreeSize;
5673  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5674  uint32_t m_1stVectorIndex;
5675  SECOND_VECTOR_MODE m_2ndVectorMode;
5676 
5677  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5678  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5679  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5680  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5681 
5682  // Number of items in 1st vector with hAllocation = null at the beginning.
5683  size_t m_1stNullItemsBeginCount;
5684  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5685  size_t m_1stNullItemsMiddleCount;
5686  // Number of items in 2nd vector with hAllocation = null.
5687  size_t m_2ndNullItemsCount;
5688 
5689  bool ShouldCompact1st() const;
5690  void CleanupAfterFree();
5691 
5692  bool CreateAllocationRequest_LowerAddress(
5693  uint32_t currentFrameIndex,
5694  uint32_t frameInUseCount,
5695  VkDeviceSize bufferImageGranularity,
5696  VkDeviceSize allocSize,
5697  VkDeviceSize allocAlignment,
5698  VmaSuballocationType allocType,
5699  bool canMakeOtherLost,
5700  uint32_t strategy,
5701  VmaAllocationRequest* pAllocationRequest);
5702  bool CreateAllocationRequest_UpperAddress(
5703  uint32_t currentFrameIndex,
5704  uint32_t frameInUseCount,
5705  VkDeviceSize bufferImageGranularity,
5706  VkDeviceSize allocSize,
5707  VkDeviceSize allocAlignment,
5708  VmaSuballocationType allocType,
5709  bool canMakeOtherLost,
5710  uint32_t strategy,
5711  VmaAllocationRequest* pAllocationRequest);
5712 };
5713 
5714 /*
5715 - GetSize() is the original size of allocated memory block.
5716 - m_UsableSize is this size aligned down to a power of two.
5717  All allocations and calculations happen relative to m_UsableSize.
5718 - GetUnusableSize() is the difference between them.
5719  It is repoted as separate, unused range, not available for allocations.
5720 
5721 Node at level 0 has size = m_UsableSize.
5722 Each next level contains nodes with size 2 times smaller than current level.
5723 m_LevelCount is the maximum number of levels to use in the current object.
5724 */
5725 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5726 {
5727  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5728 public:
5729  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5730  virtual ~VmaBlockMetadata_Buddy();
5731  virtual void Init(VkDeviceSize size);
5732 
5733  virtual bool Validate() const;
5734  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5735  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5736  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5737  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5738 
5739  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5740  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5741 
5742 #if VMA_STATS_STRING_ENABLED
5743  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5744 #endif
5745 
5746  virtual bool CreateAllocationRequest(
5747  uint32_t currentFrameIndex,
5748  uint32_t frameInUseCount,
5749  VkDeviceSize bufferImageGranularity,
5750  VkDeviceSize allocSize,
5751  VkDeviceSize allocAlignment,
5752  bool upperAddress,
5753  VmaSuballocationType allocType,
5754  bool canMakeOtherLost,
5755  uint32_t strategy,
5756  VmaAllocationRequest* pAllocationRequest);
5757 
5758  virtual bool MakeRequestedAllocationsLost(
5759  uint32_t currentFrameIndex,
5760  uint32_t frameInUseCount,
5761  VmaAllocationRequest* pAllocationRequest);
5762 
5763  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5764 
5765  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5766 
5767  virtual void Alloc(
5768  const VmaAllocationRequest& request,
5769  VmaSuballocationType type,
5770  VkDeviceSize allocSize,
5771  VmaAllocation hAllocation);
5772 
5773  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5774  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5775 
5776 private:
5777  static const VkDeviceSize MIN_NODE_SIZE = 32;
5778  static const size_t MAX_LEVELS = 30;
5779 
5780  struct ValidationContext
5781  {
5782  size_t calculatedAllocationCount;
5783  size_t calculatedFreeCount;
5784  VkDeviceSize calculatedSumFreeSize;
5785 
5786  ValidationContext() :
5787  calculatedAllocationCount(0),
5788  calculatedFreeCount(0),
5789  calculatedSumFreeSize(0) { }
5790  };
5791 
5792  struct Node
5793  {
5794  VkDeviceSize offset;
5795  enum TYPE
5796  {
5797  TYPE_FREE,
5798  TYPE_ALLOCATION,
5799  TYPE_SPLIT,
5800  TYPE_COUNT
5801  } type;
5802  Node* parent;
5803  Node* buddy;
5804 
5805  union
5806  {
5807  struct
5808  {
5809  Node* prev;
5810  Node* next;
5811  } free;
5812  struct
5813  {
5814  VmaAllocation alloc;
5815  } allocation;
5816  struct
5817  {
5818  Node* leftChild;
5819  } split;
5820  };
5821  };
5822 
5823  // Size of the memory block aligned down to a power of two.
5824  VkDeviceSize m_UsableSize;
5825  uint32_t m_LevelCount;
5826 
5827  Node* m_Root;
5828  struct {
5829  Node* front;
5830  Node* back;
5831  } m_FreeList[MAX_LEVELS];
5832  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5833  size_t m_AllocationCount;
5834  // Number of nodes in the tree with type == TYPE_FREE.
5835  size_t m_FreeCount;
5836  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5837  VkDeviceSize m_SumFreeSize;
5838 
5839  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5840  void DeleteNode(Node* node);
5841  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5842  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5843  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5844  // Alloc passed just for validation. Can be null.
5845  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5846  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5847  // Adds node to the front of FreeList at given level.
5848  // node->type must be FREE.
5849  // node->free.prev, next can be undefined.
5850  void AddToFreeListFront(uint32_t level, Node* node);
5851  // Removes node from FreeList at given level.
5852  // node->type must be FREE.
5853  // node->free.prev, next stay untouched.
5854  void RemoveFromFreeList(uint32_t level, Node* node);
5855 
5856 #if VMA_STATS_STRING_ENABLED
5857  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5858 #endif
5859 };
5860 
5861 /*
5862 Represents a single block of device memory (`VkDeviceMemory`) with all the
5863 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5864 
5865 Thread-safety: This class must be externally synchronized.
5866 */
5867 class VmaDeviceMemoryBlock
5868 {
5869  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5870 public:
5871  VmaBlockMetadata* m_pMetadata;
5872 
5873  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5874 
5875  ~VmaDeviceMemoryBlock()
5876  {
5877  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5878  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5879  }
5880 
5881  // Always call after construction.
5882  void Init(
5883  VmaAllocator hAllocator,
5884  VmaPool hParentPool,
5885  uint32_t newMemoryTypeIndex,
5886  VkDeviceMemory newMemory,
5887  VkDeviceSize newSize,
5888  uint32_t id,
5889  uint32_t algorithm);
5890  // Always call before destruction.
5891  void Destroy(VmaAllocator allocator);
5892 
5893  VmaPool GetParentPool() const { return m_hParentPool; }
5894  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5895  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5896  uint32_t GetId() const { return m_Id; }
5897  void* GetMappedData() const { return m_pMappedData; }
5898 
5899  // Validates all data structures inside this object. If not valid, returns false.
5900  bool Validate() const;
5901 
5902  VkResult CheckCorruption(VmaAllocator hAllocator);
5903 
5904  // ppData can be null.
5905  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5906  void Unmap(VmaAllocator hAllocator, uint32_t count);
5907 
5908  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5909  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5910 
5911  VkResult BindBufferMemory(
5912  const VmaAllocator hAllocator,
5913  const VmaAllocation hAllocation,
5914  VkDeviceSize allocationLocalOffset,
5915  VkBuffer hBuffer,
5916  const void* pNext);
5917  VkResult BindImageMemory(
5918  const VmaAllocator hAllocator,
5919  const VmaAllocation hAllocation,
5920  VkDeviceSize allocationLocalOffset,
5921  VkImage hImage,
5922  const void* pNext);
5923 
5924 private:
5925  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5926  uint32_t m_MemoryTypeIndex;
5927  uint32_t m_Id;
5928  VkDeviceMemory m_hMemory;
5929 
5930  /*
5931  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5932  Also protects m_MapCount, m_pMappedData.
5933  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5934  */
5935  VMA_MUTEX m_Mutex;
5936  uint32_t m_MapCount;
5937  void* m_pMappedData;
5938 };
5939 
5940 struct VmaPointerLess
5941 {
5942  bool operator()(const void* lhs, const void* rhs) const
5943  {
5944  return lhs < rhs;
5945  }
5946 };
5947 
5948 struct VmaDefragmentationMove
5949 {
5950  size_t srcBlockIndex;
5951  size_t dstBlockIndex;
5952  VkDeviceSize srcOffset;
5953  VkDeviceSize dstOffset;
5954  VkDeviceSize size;
5955 };
5956 
5957 class VmaDefragmentationAlgorithm;
5958 
5959 /*
5960 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5961 Vulkan memory type.
5962 
5963 Synchronized internally with a mutex.
5964 */
5965 struct VmaBlockVector
5966 {
5967  VMA_CLASS_NO_COPY(VmaBlockVector)
5968 public:
5969  VmaBlockVector(
5970  VmaAllocator hAllocator,
5971  VmaPool hParentPool,
5972  uint32_t memoryTypeIndex,
5973  VkDeviceSize preferredBlockSize,
5974  size_t minBlockCount,
5975  size_t maxBlockCount,
5976  VkDeviceSize bufferImageGranularity,
5977  uint32_t frameInUseCount,
5978  bool isCustomPool,
5979  bool explicitBlockSize,
5980  uint32_t algorithm);
5981  ~VmaBlockVector();
5982 
5983  VkResult CreateMinBlocks();
5984 
5985  VmaPool GetParentPool() const { return m_hParentPool; }
5986  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5987  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5988  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5989  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5990  uint32_t GetAlgorithm() const { return m_Algorithm; }
5991 
5992  void GetPoolStats(VmaPoolStats* pStats);
5993 
5994  bool IsEmpty() const { return m_Blocks.empty(); }
5995  bool IsCorruptionDetectionEnabled() const;
5996 
5997  VkResult Allocate(
5998  uint32_t currentFrameIndex,
5999  VkDeviceSize size,
6000  VkDeviceSize alignment,
6001  const VmaAllocationCreateInfo& createInfo,
6002  VmaSuballocationType suballocType,
6003  size_t allocationCount,
6004  VmaAllocation* pAllocations);
6005 
6006  void Free(
6007  VmaAllocation hAllocation);
6008 
6009  // Adds statistics of this BlockVector to pStats.
6010  void AddStats(VmaStats* pStats);
6011 
6012 #if VMA_STATS_STRING_ENABLED
6013  void PrintDetailedMap(class VmaJsonWriter& json);
6014 #endif
6015 
6016  void MakePoolAllocationsLost(
6017  uint32_t currentFrameIndex,
6018  size_t* pLostAllocationCount);
6019  VkResult CheckCorruption();
6020 
6021  // Saves results in pCtx->res.
6022  void Defragment(
6023  class VmaBlockVectorDefragmentationContext* pCtx,
6024  VmaDefragmentationStats* pStats,
6025  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6026  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6027  VkCommandBuffer commandBuffer);
6028  void DefragmentationEnd(
6029  class VmaBlockVectorDefragmentationContext* pCtx,
6030  VmaDefragmentationStats* pStats);
6031 
6033  // To be used only while the m_Mutex is locked. Used during defragmentation.
6034 
6035  size_t GetBlockCount() const { return m_Blocks.size(); }
6036  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6037  size_t CalcAllocationCount() const;
6038  bool IsBufferImageGranularityConflictPossible() const;
6039 
6040 private:
6041  friend class VmaDefragmentationAlgorithm_Generic;
6042 
6043  const VmaAllocator m_hAllocator;
6044  const VmaPool m_hParentPool;
6045  const uint32_t m_MemoryTypeIndex;
6046  const VkDeviceSize m_PreferredBlockSize;
6047  const size_t m_MinBlockCount;
6048  const size_t m_MaxBlockCount;
6049  const VkDeviceSize m_BufferImageGranularity;
6050  const uint32_t m_FrameInUseCount;
6051  const bool m_IsCustomPool;
6052  const bool m_ExplicitBlockSize;
6053  const uint32_t m_Algorithm;
6054  /* There can be at most one allocation that is completely empty - a
6055  hysteresis to avoid pessimistic case of alternating creation and destruction
6056  of a VkDeviceMemory. */
6057  bool m_HasEmptyBlock;
6058  VMA_RW_MUTEX m_Mutex;
6059  // Incrementally sorted by sumFreeSize, ascending.
6060  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6061  uint32_t m_NextBlockId;
6062 
6063  VkDeviceSize CalcMaxBlockSize() const;
6064 
6065  // Finds and removes given block from vector.
6066  void Remove(VmaDeviceMemoryBlock* pBlock);
6067 
6068  // Performs single step in sorting m_Blocks. They may not be fully sorted
6069  // after this call.
6070  void IncrementallySortBlocks();
6071 
6072  VkResult AllocatePage(
6073  uint32_t currentFrameIndex,
6074  VkDeviceSize size,
6075  VkDeviceSize alignment,
6076  const VmaAllocationCreateInfo& createInfo,
6077  VmaSuballocationType suballocType,
6078  VmaAllocation* pAllocation);
6079 
6080  // To be used only without CAN_MAKE_OTHER_LOST flag.
6081  VkResult AllocateFromBlock(
6082  VmaDeviceMemoryBlock* pBlock,
6083  uint32_t currentFrameIndex,
6084  VkDeviceSize size,
6085  VkDeviceSize alignment,
6086  VmaAllocationCreateFlags allocFlags,
6087  void* pUserData,
6088  VmaSuballocationType suballocType,
6089  uint32_t strategy,
6090  VmaAllocation* pAllocation);
6091 
6092  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6093 
6094  // Saves result to pCtx->res.
6095  void ApplyDefragmentationMovesCpu(
6096  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6097  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6098  // Saves result to pCtx->res.
6099  void ApplyDefragmentationMovesGpu(
6100  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6101  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6102  VkCommandBuffer commandBuffer);
6103 
6104  /*
6105  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6106  - updated with new data.
6107  */
6108  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6109 };
6110 
6111 struct VmaPool_T
6112 {
6113  VMA_CLASS_NO_COPY(VmaPool_T)
6114 public:
6115  VmaBlockVector m_BlockVector;
6116 
6117  VmaPool_T(
6118  VmaAllocator hAllocator,
6119  const VmaPoolCreateInfo& createInfo,
6120  VkDeviceSize preferredBlockSize);
6121  ~VmaPool_T();
6122 
6123  uint32_t GetId() const { return m_Id; }
6124  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6125 
6126 #if VMA_STATS_STRING_ENABLED
6127  //void PrintDetailedMap(class VmaStringBuilder& sb);
6128 #endif
6129 
6130 private:
6131  uint32_t m_Id;
6132 };
6133 
6134 /*
6135 Performs defragmentation:
6136 
6137 - Updates `pBlockVector->m_pMetadata`.
6138 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6139 - Does not move actual data, only returns requested moves as `moves`.
6140 */
6141 class VmaDefragmentationAlgorithm
6142 {
6143  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6144 public:
6145  VmaDefragmentationAlgorithm(
6146  VmaAllocator hAllocator,
6147  VmaBlockVector* pBlockVector,
6148  uint32_t currentFrameIndex) :
6149  m_hAllocator(hAllocator),
6150  m_pBlockVector(pBlockVector),
6151  m_CurrentFrameIndex(currentFrameIndex)
6152  {
6153  }
6154  virtual ~VmaDefragmentationAlgorithm()
6155  {
6156  }
6157 
6158  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6159  virtual void AddAll() = 0;
6160 
6161  virtual VkResult Defragment(
6162  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6163  VkDeviceSize maxBytesToMove,
6164  uint32_t maxAllocationsToMove) = 0;
6165 
6166  virtual VkDeviceSize GetBytesMoved() const = 0;
6167  virtual uint32_t GetAllocationsMoved() const = 0;
6168 
6169 protected:
6170  VmaAllocator const m_hAllocator;
6171  VmaBlockVector* const m_pBlockVector;
6172  const uint32_t m_CurrentFrameIndex;
6173 
6174  struct AllocationInfo
6175  {
6176  VmaAllocation m_hAllocation;
6177  VkBool32* m_pChanged;
6178 
6179  AllocationInfo() :
6180  m_hAllocation(VK_NULL_HANDLE),
6181  m_pChanged(VMA_NULL)
6182  {
6183  }
6184  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6185  m_hAllocation(hAlloc),
6186  m_pChanged(pChanged)
6187  {
6188  }
6189  };
6190 };
6191 
6192 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6193 {
6194  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6195 public:
6196  VmaDefragmentationAlgorithm_Generic(
6197  VmaAllocator hAllocator,
6198  VmaBlockVector* pBlockVector,
6199  uint32_t currentFrameIndex,
6200  bool overlappingMoveSupported);
6201  virtual ~VmaDefragmentationAlgorithm_Generic();
6202 
6203  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6204  virtual void AddAll() { m_AllAllocations = true; }
6205 
6206  virtual VkResult Defragment(
6207  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6208  VkDeviceSize maxBytesToMove,
6209  uint32_t maxAllocationsToMove);
6210 
6211  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6212  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6213 
6214 private:
6215  uint32_t m_AllocationCount;
6216  bool m_AllAllocations;
6217 
6218  VkDeviceSize m_BytesMoved;
6219  uint32_t m_AllocationsMoved;
6220 
6221  struct AllocationInfoSizeGreater
6222  {
6223  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6224  {
6225  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6226  }
6227  };
6228 
6229  struct AllocationInfoOffsetGreater
6230  {
6231  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6232  {
6233  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6234  }
6235  };
6236 
6237  struct BlockInfo
6238  {
6239  size_t m_OriginalBlockIndex;
6240  VmaDeviceMemoryBlock* m_pBlock;
6241  bool m_HasNonMovableAllocations;
6242  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6243 
6244  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6245  m_OriginalBlockIndex(SIZE_MAX),
6246  m_pBlock(VMA_NULL),
6247  m_HasNonMovableAllocations(true),
6248  m_Allocations(pAllocationCallbacks)
6249  {
6250  }
6251 
6252  void CalcHasNonMovableAllocations()
6253  {
6254  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6255  const size_t defragmentAllocCount = m_Allocations.size();
6256  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6257  }
6258 
6259  void SortAllocationsBySizeDescending()
6260  {
6261  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6262  }
6263 
6264  void SortAllocationsByOffsetDescending()
6265  {
6266  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6267  }
6268  };
6269 
6270  struct BlockPointerLess
6271  {
6272  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6273  {
6274  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6275  }
6276  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6277  {
6278  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6279  }
6280  };
6281 
6282  // 1. Blocks with some non-movable allocations go first.
6283  // 2. Blocks with smaller sumFreeSize go first.
6284  struct BlockInfoCompareMoveDestination
6285  {
6286  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6287  {
6288  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6289  {
6290  return true;
6291  }
6292  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6293  {
6294  return false;
6295  }
6296  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6297  {
6298  return true;
6299  }
6300  return false;
6301  }
6302  };
6303 
6304  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6305  BlockInfoVector m_Blocks;
6306 
6307  VkResult DefragmentRound(
6308  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6309  VkDeviceSize maxBytesToMove,
6310  uint32_t maxAllocationsToMove);
6311 
6312  size_t CalcBlocksWithNonMovableCount() const;
6313 
6314  static bool MoveMakesSense(
6315  size_t dstBlockIndex, VkDeviceSize dstOffset,
6316  size_t srcBlockIndex, VkDeviceSize srcOffset);
6317 };
6318 
6319 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6320 {
6321  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6322 public:
6323  VmaDefragmentationAlgorithm_Fast(
6324  VmaAllocator hAllocator,
6325  VmaBlockVector* pBlockVector,
6326  uint32_t currentFrameIndex,
6327  bool overlappingMoveSupported);
6328  virtual ~VmaDefragmentationAlgorithm_Fast();
6329 
6330  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6331  virtual void AddAll() { m_AllAllocations = true; }
6332 
6333  virtual VkResult Defragment(
6334  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6335  VkDeviceSize maxBytesToMove,
6336  uint32_t maxAllocationsToMove);
6337 
6338  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6339  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6340 
6341 private:
6342  struct BlockInfo
6343  {
6344  size_t origBlockIndex;
6345  };
6346 
6347  class FreeSpaceDatabase
6348  {
6349  public:
6350  FreeSpaceDatabase()
6351  {
6352  FreeSpace s = {};
6353  s.blockInfoIndex = SIZE_MAX;
6354  for(size_t i = 0; i < MAX_COUNT; ++i)
6355  {
6356  m_FreeSpaces[i] = s;
6357  }
6358  }
6359 
6360  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6361  {
6362  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6363  {
6364  return;
6365  }
6366 
6367  // Find first invalid or the smallest structure.
6368  size_t bestIndex = SIZE_MAX;
6369  for(size_t i = 0; i < MAX_COUNT; ++i)
6370  {
6371  // Empty structure.
6372  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6373  {
6374  bestIndex = i;
6375  break;
6376  }
6377  if(m_FreeSpaces[i].size < size &&
6378  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6379  {
6380  bestIndex = i;
6381  }
6382  }
6383 
6384  if(bestIndex != SIZE_MAX)
6385  {
6386  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6387  m_FreeSpaces[bestIndex].offset = offset;
6388  m_FreeSpaces[bestIndex].size = size;
6389  }
6390  }
6391 
6392  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6393  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6394  {
6395  size_t bestIndex = SIZE_MAX;
6396  VkDeviceSize bestFreeSpaceAfter = 0;
6397  for(size_t i = 0; i < MAX_COUNT; ++i)
6398  {
6399  // Structure is valid.
6400  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6401  {
6402  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6403  // Allocation fits into this structure.
6404  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6405  {
6406  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6407  (dstOffset + size);
6408  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6409  {
6410  bestIndex = i;
6411  bestFreeSpaceAfter = freeSpaceAfter;
6412  }
6413  }
6414  }
6415  }
6416 
6417  if(bestIndex != SIZE_MAX)
6418  {
6419  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6420  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6421 
6422  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6423  {
6424  // Leave this structure for remaining empty space.
6425  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6426  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6427  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6428  }
6429  else
6430  {
6431  // This structure becomes invalid.
6432  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6433  }
6434 
6435  return true;
6436  }
6437 
6438  return false;
6439  }
6440 
6441  private:
6442  static const size_t MAX_COUNT = 4;
6443 
6444  struct FreeSpace
6445  {
6446  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6447  VkDeviceSize offset;
6448  VkDeviceSize size;
6449  } m_FreeSpaces[MAX_COUNT];
6450  };
6451 
6452  const bool m_OverlappingMoveSupported;
6453 
6454  uint32_t m_AllocationCount;
6455  bool m_AllAllocations;
6456 
6457  VkDeviceSize m_BytesMoved;
6458  uint32_t m_AllocationsMoved;
6459 
6460  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6461 
6462  void PreprocessMetadata();
6463  void PostprocessMetadata();
6464  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6465 };
6466 
6467 struct VmaBlockDefragmentationContext
6468 {
6469  enum BLOCK_FLAG
6470  {
6471  BLOCK_FLAG_USED = 0x00000001,
6472  };
6473  uint32_t flags;
6474  VkBuffer hBuffer;
6475 };
6476 
6477 class VmaBlockVectorDefragmentationContext
6478 {
6479  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6480 public:
6481  VkResult res;
6482  bool mutexLocked;
6483  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6484 
6485  VmaBlockVectorDefragmentationContext(
6486  VmaAllocator hAllocator,
6487  VmaPool hCustomPool, // Optional.
6488  VmaBlockVector* pBlockVector,
6489  uint32_t currFrameIndex);
6490  ~VmaBlockVectorDefragmentationContext();
6491 
6492  VmaPool GetCustomPool() const { return m_hCustomPool; }
6493  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6494  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6495 
6496  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6497  void AddAll() { m_AllAllocations = true; }
6498 
6499  void Begin(bool overlappingMoveSupported);
6500 
6501 private:
6502  const VmaAllocator m_hAllocator;
6503  // Null if not from custom pool.
6504  const VmaPool m_hCustomPool;
6505  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6506  VmaBlockVector* const m_pBlockVector;
6507  const uint32_t m_CurrFrameIndex;
6508  // Owner of this object.
6509  VmaDefragmentationAlgorithm* m_pAlgorithm;
6510 
6511  struct AllocInfo
6512  {
6513  VmaAllocation hAlloc;
6514  VkBool32* pChanged;
6515  };
6516  // Used between constructor and Begin.
6517  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6518  bool m_AllAllocations;
6519 };
6520 
6521 struct VmaDefragmentationContext_T
6522 {
6523 private:
6524  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6525 public:
6526  VmaDefragmentationContext_T(
6527  VmaAllocator hAllocator,
6528  uint32_t currFrameIndex,
6529  uint32_t flags,
6530  VmaDefragmentationStats* pStats);
6531  ~VmaDefragmentationContext_T();
6532 
6533  void AddPools(uint32_t poolCount, VmaPool* pPools);
6534  void AddAllocations(
6535  uint32_t allocationCount,
6536  VmaAllocation* pAllocations,
6537  VkBool32* pAllocationsChanged);
6538 
6539  /*
6540  Returns:
6541  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6542  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6543  - Negative value if error occured and object can be destroyed immediately.
6544  */
6545  VkResult Defragment(
6546  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6547  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6548  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6549 
6550 private:
6551  const VmaAllocator m_hAllocator;
6552  const uint32_t m_CurrFrameIndex;
6553  const uint32_t m_Flags;
6554  VmaDefragmentationStats* const m_pStats;
6555  // Owner of these objects.
6556  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6557  // Owner of these objects.
6558  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6559 };
6560 
6561 #if VMA_RECORDING_ENABLED
6562 
6563 class VmaRecorder
6564 {
6565 public:
6566  VmaRecorder();
6567  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6568  void WriteConfiguration(
6569  const VkPhysicalDeviceProperties& devProps,
6570  const VkPhysicalDeviceMemoryProperties& memProps,
6571  bool dedicatedAllocationExtensionEnabled,
6572  bool bindMemory2ExtensionEnabled);
6573  ~VmaRecorder();
6574 
6575  void RecordCreateAllocator(uint32_t frameIndex);
6576  void RecordDestroyAllocator(uint32_t frameIndex);
6577  void RecordCreatePool(uint32_t frameIndex,
6578  const VmaPoolCreateInfo& createInfo,
6579  VmaPool pool);
6580  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6581  void RecordAllocateMemory(uint32_t frameIndex,
6582  const VkMemoryRequirements& vkMemReq,
6583  const VmaAllocationCreateInfo& createInfo,
6584  VmaAllocation allocation);
6585  void RecordAllocateMemoryPages(uint32_t frameIndex,
6586  const VkMemoryRequirements& vkMemReq,
6587  const VmaAllocationCreateInfo& createInfo,
6588  uint64_t allocationCount,
6589  const VmaAllocation* pAllocations);
6590  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6591  const VkMemoryRequirements& vkMemReq,
6592  bool requiresDedicatedAllocation,
6593  bool prefersDedicatedAllocation,
6594  const VmaAllocationCreateInfo& createInfo,
6595  VmaAllocation allocation);
6596  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6597  const VkMemoryRequirements& vkMemReq,
6598  bool requiresDedicatedAllocation,
6599  bool prefersDedicatedAllocation,
6600  const VmaAllocationCreateInfo& createInfo,
6601  VmaAllocation allocation);
6602  void RecordFreeMemory(uint32_t frameIndex,
6603  VmaAllocation allocation);
6604  void RecordFreeMemoryPages(uint32_t frameIndex,
6605  uint64_t allocationCount,
6606  const VmaAllocation* pAllocations);
6607  void RecordSetAllocationUserData(uint32_t frameIndex,
6608  VmaAllocation allocation,
6609  const void* pUserData);
6610  void RecordCreateLostAllocation(uint32_t frameIndex,
6611  VmaAllocation allocation);
6612  void RecordMapMemory(uint32_t frameIndex,
6613  VmaAllocation allocation);
6614  void RecordUnmapMemory(uint32_t frameIndex,
6615  VmaAllocation allocation);
6616  void RecordFlushAllocation(uint32_t frameIndex,
6617  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6618  void RecordInvalidateAllocation(uint32_t frameIndex,
6619  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6620  void RecordCreateBuffer(uint32_t frameIndex,
6621  const VkBufferCreateInfo& bufCreateInfo,
6622  const VmaAllocationCreateInfo& allocCreateInfo,
6623  VmaAllocation allocation);
6624  void RecordCreateImage(uint32_t frameIndex,
6625  const VkImageCreateInfo& imageCreateInfo,
6626  const VmaAllocationCreateInfo& allocCreateInfo,
6627  VmaAllocation allocation);
6628  void RecordDestroyBuffer(uint32_t frameIndex,
6629  VmaAllocation allocation);
6630  void RecordDestroyImage(uint32_t frameIndex,
6631  VmaAllocation allocation);
6632  void RecordTouchAllocation(uint32_t frameIndex,
6633  VmaAllocation allocation);
6634  void RecordGetAllocationInfo(uint32_t frameIndex,
6635  VmaAllocation allocation);
6636  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6637  VmaPool pool);
6638  void RecordDefragmentationBegin(uint32_t frameIndex,
6639  const VmaDefragmentationInfo2& info,
6641  void RecordDefragmentationEnd(uint32_t frameIndex,
6643 
6644 private:
6645  struct CallParams
6646  {
6647  uint32_t threadId;
6648  double time;
6649  };
6650 
6651  class UserDataString
6652  {
6653  public:
6654  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6655  const char* GetString() const { return m_Str; }
6656 
6657  private:
6658  char m_PtrStr[17];
6659  const char* m_Str;
6660  };
6661 
6662  bool m_UseMutex;
6663  VmaRecordFlags m_Flags;
6664  FILE* m_File;
6665  VMA_MUTEX m_FileMutex;
6666  int64_t m_Freq;
6667  int64_t m_StartCounter;
6668 
6669  void GetBasicParams(CallParams& outParams);
6670 
6671  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6672  template<typename T>
6673  void PrintPointerList(uint64_t count, const T* pItems)
6674  {
6675  if(count)
6676  {
6677  fprintf(m_File, "%p", pItems[0]);
6678  for(uint64_t i = 1; i < count; ++i)
6679  {
6680  fprintf(m_File, " %p", pItems[i]);
6681  }
6682  }
6683  }
6684 
6685  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6686  void Flush();
6687 };
6688 
6689 #endif // #if VMA_RECORDING_ENABLED
6690 
6691 /*
6692 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6693 */
6694 class VmaAllocationObjectAllocator
6695 {
6696  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6697 public:
6698  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6699 
6700  VmaAllocation Allocate();
6701  void Free(VmaAllocation hAlloc);
6702 
6703 private:
6704  VMA_MUTEX m_Mutex;
6705  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6706 };
6707 
6708 // Main allocator object.
6709 struct VmaAllocator_T
6710 {
6711  VMA_CLASS_NO_COPY(VmaAllocator_T)
6712 public:
6713  bool m_UseMutex;
6714  bool m_UseKhrDedicatedAllocation;
6715  bool m_UseKhrBindMemory2;
6716  VkDevice m_hDevice;
6717  bool m_AllocationCallbacksSpecified;
6718  VkAllocationCallbacks m_AllocationCallbacks;
6719  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6720  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6721 
6722  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6723  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6724  VMA_MUTEX m_HeapSizeLimitMutex;
6725 
6726  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6727  VkPhysicalDeviceMemoryProperties m_MemProps;
6728 
6729  // Default pools.
6730  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6731 
6732  // Each vector is sorted by memory (handle value).
6733  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6734  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6735  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6736 
6737  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6738  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6739  ~VmaAllocator_T();
6740 
6741  const VkAllocationCallbacks* GetAllocationCallbacks() const
6742  {
6743  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6744  }
6745  const VmaVulkanFunctions& GetVulkanFunctions() const
6746  {
6747  return m_VulkanFunctions;
6748  }
6749 
6750  VkDeviceSize GetBufferImageGranularity() const
6751  {
6752  return VMA_MAX(
6753  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6754  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6755  }
6756 
6757  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6758  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6759 
6760  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6761  {
6762  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6763  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6764  }
6765  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6766  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6767  {
6768  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6769  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6770  }
6771  // Minimum alignment for all allocations in specific memory type.
6772  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6773  {
6774  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6775  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6776  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6777  }
6778 
6779  bool IsIntegratedGpu() const
6780  {
6781  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6782  }
6783 
6784 #if VMA_RECORDING_ENABLED
6785  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6786 #endif
6787 
6788  void GetBufferMemoryRequirements(
6789  VkBuffer hBuffer,
6790  VkMemoryRequirements& memReq,
6791  bool& requiresDedicatedAllocation,
6792  bool& prefersDedicatedAllocation) const;
6793  void GetImageMemoryRequirements(
6794  VkImage hImage,
6795  VkMemoryRequirements& memReq,
6796  bool& requiresDedicatedAllocation,
6797  bool& prefersDedicatedAllocation) const;
6798 
6799  // Main allocation function.
6800  VkResult AllocateMemory(
6801  const VkMemoryRequirements& vkMemReq,
6802  bool requiresDedicatedAllocation,
6803  bool prefersDedicatedAllocation,
6804  VkBuffer dedicatedBuffer,
6805  VkImage dedicatedImage,
6806  const VmaAllocationCreateInfo& createInfo,
6807  VmaSuballocationType suballocType,
6808  size_t allocationCount,
6809  VmaAllocation* pAllocations);
6810 
6811  // Main deallocation function.
6812  void FreeMemory(
6813  size_t allocationCount,
6814  const VmaAllocation* pAllocations);
6815 
6816  VkResult ResizeAllocation(
6817  const VmaAllocation alloc,
6818  VkDeviceSize newSize);
6819 
6820  void CalculateStats(VmaStats* pStats);
6821 
6822 #if VMA_STATS_STRING_ENABLED
6823  void PrintDetailedMap(class VmaJsonWriter& json);
6824 #endif
6825 
6826  VkResult DefragmentationBegin(
6827  const VmaDefragmentationInfo2& info,
6828  VmaDefragmentationStats* pStats,
6829  VmaDefragmentationContext* pContext);
6830  VkResult DefragmentationEnd(
6831  VmaDefragmentationContext context);
6832 
6833  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6834  bool TouchAllocation(VmaAllocation hAllocation);
6835 
6836  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6837  void DestroyPool(VmaPool pool);
6838  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6839 
6840  void SetCurrentFrameIndex(uint32_t frameIndex);
6841  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6842 
6843  void MakePoolAllocationsLost(
6844  VmaPool hPool,
6845  size_t* pLostAllocationCount);
6846  VkResult CheckPoolCorruption(VmaPool hPool);
6847  VkResult CheckCorruption(uint32_t memoryTypeBits);
6848 
6849  void CreateLostAllocation(VmaAllocation* pAllocation);
6850 
6851  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
6852  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6853  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
6854  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6855  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
6856  VkResult BindVulkanBuffer(
6857  VkDeviceMemory memory,
6858  VkDeviceSize memoryOffset,
6859  VkBuffer buffer,
6860  const void* pNext);
6861  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
6862  VkResult BindVulkanImage(
6863  VkDeviceMemory memory,
6864  VkDeviceSize memoryOffset,
6865  VkImage image,
6866  const void* pNext);
6867 
6868  VkResult Map(VmaAllocation hAllocation, void** ppData);
6869  void Unmap(VmaAllocation hAllocation);
6870 
6871  VkResult BindBufferMemory(
6872  VmaAllocation hAllocation,
6873  VkDeviceSize allocationLocalOffset,
6874  VkBuffer hBuffer,
6875  const void* pNext);
6876  VkResult BindImageMemory(
6877  VmaAllocation hAllocation,
6878  VkDeviceSize allocationLocalOffset,
6879  VkImage hImage,
6880  const void* pNext);
6881 
6882  void FlushOrInvalidateAllocation(
6883  VmaAllocation hAllocation,
6884  VkDeviceSize offset, VkDeviceSize size,
6885  VMA_CACHE_OPERATION op);
6886 
6887  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6888 
6889  /*
6890  Returns bit mask of memory types that can support defragmentation on GPU as
6891  they support creation of required buffer for copy operations.
6892  */
6893  uint32_t GetGpuDefragmentationMemoryTypeBits();
6894 
6895 private:
6896  VkDeviceSize m_PreferredLargeHeapBlockSize;
6897 
6898  VkPhysicalDevice m_PhysicalDevice;
6899  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6900  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6901 
6902  VMA_RW_MUTEX m_PoolsMutex;
6903  // Protected by m_PoolsMutex. Sorted by pointer value.
6904  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6905  uint32_t m_NextPoolId;
6906 
6907  VmaVulkanFunctions m_VulkanFunctions;
6908 
6909 #if VMA_RECORDING_ENABLED
6910  VmaRecorder* m_pRecorder;
6911 #endif
6912 
6913  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6914 
6915  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6916 
6917  VkResult AllocateMemoryOfType(
6918  VkDeviceSize size,
6919  VkDeviceSize alignment,
6920  bool dedicatedAllocation,
6921  VkBuffer dedicatedBuffer,
6922  VkImage dedicatedImage,
6923  const VmaAllocationCreateInfo& createInfo,
6924  uint32_t memTypeIndex,
6925  VmaSuballocationType suballocType,
6926  size_t allocationCount,
6927  VmaAllocation* pAllocations);
6928 
6929  // Helper function only to be used inside AllocateDedicatedMemory.
6930  VkResult AllocateDedicatedMemoryPage(
6931  VkDeviceSize size,
6932  VmaSuballocationType suballocType,
6933  uint32_t memTypeIndex,
6934  const VkMemoryAllocateInfo& allocInfo,
6935  bool map,
6936  bool isUserDataString,
6937  void* pUserData,
6938  VmaAllocation* pAllocation);
6939 
6940  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6941  VkResult AllocateDedicatedMemory(
6942  VkDeviceSize size,
6943  VmaSuballocationType suballocType,
6944  uint32_t memTypeIndex,
6945  bool map,
6946  bool isUserDataString,
6947  void* pUserData,
6948  VkBuffer dedicatedBuffer,
6949  VkImage dedicatedImage,
6950  size_t allocationCount,
6951  VmaAllocation* pAllocations);
6952 
6953  void FreeDedicatedMemory(VmaAllocation allocation);
6954 
6955  /*
6956  Calculates and returns bit mask of memory types that can support defragmentation
6957  on GPU as they support creation of required buffer for copy operations.
6958  */
6959  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6960 };
6961 
6963 // Memory allocation #2 after VmaAllocator_T definition
6964 
6965 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6966 {
6967  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6968 }
6969 
6970 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6971 {
6972  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6973 }
6974 
6975 template<typename T>
6976 static T* VmaAllocate(VmaAllocator hAllocator)
6977 {
6978  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6979 }
6980 
6981 template<typename T>
6982 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6983 {
6984  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6985 }
6986 
6987 template<typename T>
6988 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6989 {
6990  if(ptr != VMA_NULL)
6991  {
6992  ptr->~T();
6993  VmaFree(hAllocator, ptr);
6994  }
6995 }
6996 
6997 template<typename T>
6998 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6999 {
7000  if(ptr != VMA_NULL)
7001  {
7002  for(size_t i = count; i--; )
7003  ptr[i].~T();
7004  VmaFree(hAllocator, ptr);
7005  }
7006 }
7007 
7009 // VmaStringBuilder
7010 
7011 #if VMA_STATS_STRING_ENABLED
7012 
7013 class VmaStringBuilder
7014 {
7015 public:
7016  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7017  size_t GetLength() const { return m_Data.size(); }
7018  const char* GetData() const { return m_Data.data(); }
7019 
7020  void Add(char ch) { m_Data.push_back(ch); }
7021  void Add(const char* pStr);
7022  void AddNewLine() { Add('\n'); }
7023  void AddNumber(uint32_t num);
7024  void AddNumber(uint64_t num);
7025  void AddPointer(const void* ptr);
7026 
7027 private:
7028  VmaVector< char, VmaStlAllocator<char> > m_Data;
7029 };
7030 
7031 void VmaStringBuilder::Add(const char* pStr)
7032 {
7033  const size_t strLen = strlen(pStr);
7034  if(strLen > 0)
7035  {
7036  const size_t oldCount = m_Data.size();
7037  m_Data.resize(oldCount + strLen);
7038  memcpy(m_Data.data() + oldCount, pStr, strLen);
7039  }
7040 }
7041 
7042 void VmaStringBuilder::AddNumber(uint32_t num)
7043 {
7044  char buf[11];
7045  VmaUint32ToStr(buf, sizeof(buf), num);
7046  Add(buf);
7047 }
7048 
7049 void VmaStringBuilder::AddNumber(uint64_t num)
7050 {
7051  char buf[21];
7052  VmaUint64ToStr(buf, sizeof(buf), num);
7053  Add(buf);
7054 }
7055 
7056 void VmaStringBuilder::AddPointer(const void* ptr)
7057 {
7058  char buf[21];
7059  VmaPtrToStr(buf, sizeof(buf), ptr);
7060  Add(buf);
7061 }
7062 
7063 #endif // #if VMA_STATS_STRING_ENABLED
7064 
7066 // VmaJsonWriter
7067 
7068 #if VMA_STATS_STRING_ENABLED
7069 
7070 class VmaJsonWriter
7071 {
7072  VMA_CLASS_NO_COPY(VmaJsonWriter)
7073 public:
7074  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7075  ~VmaJsonWriter();
7076 
7077  void BeginObject(bool singleLine = false);
7078  void EndObject();
7079 
7080  void BeginArray(bool singleLine = false);
7081  void EndArray();
7082 
7083  void WriteString(const char* pStr);
7084  void BeginString(const char* pStr = VMA_NULL);
7085  void ContinueString(const char* pStr);
7086  void ContinueString(uint32_t n);
7087  void ContinueString(uint64_t n);
7088  void ContinueString_Pointer(const void* ptr);
7089  void EndString(const char* pStr = VMA_NULL);
7090 
7091  void WriteNumber(uint32_t n);
7092  void WriteNumber(uint64_t n);
7093  void WriteBool(bool b);
7094  void WriteNull();
7095 
7096 private:
7097  static const char* const INDENT;
7098 
7099  enum COLLECTION_TYPE
7100  {
7101  COLLECTION_TYPE_OBJECT,
7102  COLLECTION_TYPE_ARRAY,
7103  };
7104  struct StackItem
7105  {
7106  COLLECTION_TYPE type;
7107  uint32_t valueCount;
7108  bool singleLineMode;
7109  };
7110 
7111  VmaStringBuilder& m_SB;
7112  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7113  bool m_InsideString;
7114 
7115  void BeginValue(bool isString);
7116  void WriteIndent(bool oneLess = false);
7117 };
7118 
7119 const char* const VmaJsonWriter::INDENT = " ";
7120 
7121 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7122  m_SB(sb),
7123  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7124  m_InsideString(false)
7125 {
7126 }
7127 
7128 VmaJsonWriter::~VmaJsonWriter()
7129 {
7130  VMA_ASSERT(!m_InsideString);
7131  VMA_ASSERT(m_Stack.empty());
7132 }
7133 
7134 void VmaJsonWriter::BeginObject(bool singleLine)
7135 {
7136  VMA_ASSERT(!m_InsideString);
7137 
7138  BeginValue(false);
7139  m_SB.Add('{');
7140 
7141  StackItem item;
7142  item.type = COLLECTION_TYPE_OBJECT;
7143  item.valueCount = 0;
7144  item.singleLineMode = singleLine;
7145  m_Stack.push_back(item);
7146 }
7147 
7148 void VmaJsonWriter::EndObject()
7149 {
7150  VMA_ASSERT(!m_InsideString);
7151 
7152  WriteIndent(true);
7153  m_SB.Add('}');
7154 
7155  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7156  m_Stack.pop_back();
7157 }
7158 
7159 void VmaJsonWriter::BeginArray(bool singleLine)
7160 {
7161  VMA_ASSERT(!m_InsideString);
7162 
7163  BeginValue(false);
7164  m_SB.Add('[');
7165 
7166  StackItem item;
7167  item.type = COLLECTION_TYPE_ARRAY;
7168  item.valueCount = 0;
7169  item.singleLineMode = singleLine;
7170  m_Stack.push_back(item);
7171 }
7172 
7173 void VmaJsonWriter::EndArray()
7174 {
7175  VMA_ASSERT(!m_InsideString);
7176 
7177  WriteIndent(true);
7178  m_SB.Add(']');
7179 
7180  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7181  m_Stack.pop_back();
7182 }
7183 
7184 void VmaJsonWriter::WriteString(const char* pStr)
7185 {
7186  BeginString(pStr);
7187  EndString();
7188 }
7189 
7190 void VmaJsonWriter::BeginString(const char* pStr)
7191 {
7192  VMA_ASSERT(!m_InsideString);
7193 
7194  BeginValue(true);
7195  m_SB.Add('"');
7196  m_InsideString = true;
7197  if(pStr != VMA_NULL && pStr[0] != '\0')
7198  {
7199  ContinueString(pStr);
7200  }
7201 }
7202 
7203 void VmaJsonWriter::ContinueString(const char* pStr)
7204 {
7205  VMA_ASSERT(m_InsideString);
7206 
7207  const size_t strLen = strlen(pStr);
7208  for(size_t i = 0; i < strLen; ++i)
7209  {
7210  char ch = pStr[i];
7211  if(ch == '\\')
7212  {
7213  m_SB.Add("\\\\");
7214  }
7215  else if(ch == '"')
7216  {
7217  m_SB.Add("\\\"");
7218  }
7219  else if(ch >= 32)
7220  {
7221  m_SB.Add(ch);
7222  }
7223  else switch(ch)
7224  {
7225  case '\b':
7226  m_SB.Add("\\b");
7227  break;
7228  case '\f':
7229  m_SB.Add("\\f");
7230  break;
7231  case '\n':
7232  m_SB.Add("\\n");
7233  break;
7234  case '\r':
7235  m_SB.Add("\\r");
7236  break;
7237  case '\t':
7238  m_SB.Add("\\t");
7239  break;
7240  default:
7241  VMA_ASSERT(0 && "Character not currently supported.");
7242  break;
7243  }
7244  }
7245 }
7246 
7247 void VmaJsonWriter::ContinueString(uint32_t n)
7248 {
7249  VMA_ASSERT(m_InsideString);
7250  m_SB.AddNumber(n);
7251 }
7252 
7253 void VmaJsonWriter::ContinueString(uint64_t n)
7254 {
7255  VMA_ASSERT(m_InsideString);
7256  m_SB.AddNumber(n);
7257 }
7258 
7259 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7260 {
7261  VMA_ASSERT(m_InsideString);
7262  m_SB.AddPointer(ptr);
7263 }
7264 
7265 void VmaJsonWriter::EndString(const char* pStr)
7266 {
7267  VMA_ASSERT(m_InsideString);
7268  if(pStr != VMA_NULL && pStr[0] != '\0')
7269  {
7270  ContinueString(pStr);
7271  }
7272  m_SB.Add('"');
7273  m_InsideString = false;
7274 }
7275 
7276 void VmaJsonWriter::WriteNumber(uint32_t n)
7277 {
7278  VMA_ASSERT(!m_InsideString);
7279  BeginValue(false);
7280  m_SB.AddNumber(n);
7281 }
7282 
7283 void VmaJsonWriter::WriteNumber(uint64_t n)
7284 {
7285  VMA_ASSERT(!m_InsideString);
7286  BeginValue(false);
7287  m_SB.AddNumber(n);
7288 }
7289 
7290 void VmaJsonWriter::WriteBool(bool b)
7291 {
7292  VMA_ASSERT(!m_InsideString);
7293  BeginValue(false);
7294  m_SB.Add(b ? "true" : "false");
7295 }
7296 
7297 void VmaJsonWriter::WriteNull()
7298 {
7299  VMA_ASSERT(!m_InsideString);
7300  BeginValue(false);
7301  m_SB.Add("null");
7302 }
7303 
7304 void VmaJsonWriter::BeginValue(bool isString)
7305 {
7306  if(!m_Stack.empty())
7307  {
7308  StackItem& currItem = m_Stack.back();
7309  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7310  currItem.valueCount % 2 == 0)
7311  {
7312  VMA_ASSERT(isString);
7313  }
7314 
7315  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7316  currItem.valueCount % 2 != 0)
7317  {
7318  m_SB.Add(": ");
7319  }
7320  else if(currItem.valueCount > 0)
7321  {
7322  m_SB.Add(", ");
7323  WriteIndent();
7324  }
7325  else
7326  {
7327  WriteIndent();
7328  }
7329  ++currItem.valueCount;
7330  }
7331 }
7332 
7333 void VmaJsonWriter::WriteIndent(bool oneLess)
7334 {
7335  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7336  {
7337  m_SB.AddNewLine();
7338 
7339  size_t count = m_Stack.size();
7340  if(count > 0 && oneLess)
7341  {
7342  --count;
7343  }
7344  for(size_t i = 0; i < count; ++i)
7345  {
7346  m_SB.Add(INDENT);
7347  }
7348  }
7349 }
7350 
7351 #endif // #if VMA_STATS_STRING_ENABLED
7352 
7354 
7355 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7356 {
7357  if(IsUserDataString())
7358  {
7359  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7360 
7361  FreeUserDataString(hAllocator);
7362 
7363  if(pUserData != VMA_NULL)
7364  {
7365  const char* const newStrSrc = (char*)pUserData;
7366  const size_t newStrLen = strlen(newStrSrc);
7367  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7368  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7369  m_pUserData = newStrDst;
7370  }
7371  }
7372  else
7373  {
7374  m_pUserData = pUserData;
7375  }
7376 }
7377 
7378 void VmaAllocation_T::ChangeBlockAllocation(
7379  VmaAllocator hAllocator,
7380  VmaDeviceMemoryBlock* block,
7381  VkDeviceSize offset)
7382 {
7383  VMA_ASSERT(block != VMA_NULL);
7384  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7385 
7386  // Move mapping reference counter from old block to new block.
7387  if(block != m_BlockAllocation.m_Block)
7388  {
7389  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7390  if(IsPersistentMap())
7391  ++mapRefCount;
7392  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7393  block->Map(hAllocator, mapRefCount, VMA_NULL);
7394  }
7395 
7396  m_BlockAllocation.m_Block = block;
7397  m_BlockAllocation.m_Offset = offset;
7398 }
7399 
7400 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7401 {
7402  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7403  m_BlockAllocation.m_Offset = newOffset;
7404 }
7405 
7406 VkDeviceSize VmaAllocation_T::GetOffset() const
7407 {
7408  switch(m_Type)
7409  {
7410  case ALLOCATION_TYPE_BLOCK:
7411  return m_BlockAllocation.m_Offset;
7412  case ALLOCATION_TYPE_DEDICATED:
7413  return 0;
7414  default:
7415  VMA_ASSERT(0);
7416  return 0;
7417  }
7418 }
7419 
7420 VkDeviceMemory VmaAllocation_T::GetMemory() const
7421 {
7422  switch(m_Type)
7423  {
7424  case ALLOCATION_TYPE_BLOCK:
7425  return m_BlockAllocation.m_Block->GetDeviceMemory();
7426  case ALLOCATION_TYPE_DEDICATED:
7427  return m_DedicatedAllocation.m_hMemory;
7428  default:
7429  VMA_ASSERT(0);
7430  return VK_NULL_HANDLE;
7431  }
7432 }
7433 
7434 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7435 {
7436  switch(m_Type)
7437  {
7438  case ALLOCATION_TYPE_BLOCK:
7439  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7440  case ALLOCATION_TYPE_DEDICATED:
7441  return m_DedicatedAllocation.m_MemoryTypeIndex;
7442  default:
7443  VMA_ASSERT(0);
7444  return UINT32_MAX;
7445  }
7446 }
7447 
7448 void* VmaAllocation_T::GetMappedData() const
7449 {
7450  switch(m_Type)
7451  {
7452  case ALLOCATION_TYPE_BLOCK:
7453  if(m_MapCount != 0)
7454  {
7455  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7456  VMA_ASSERT(pBlockData != VMA_NULL);
7457  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7458  }
7459  else
7460  {
7461  return VMA_NULL;
7462  }
7463  break;
7464  case ALLOCATION_TYPE_DEDICATED:
7465  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7466  return m_DedicatedAllocation.m_pMappedData;
7467  default:
7468  VMA_ASSERT(0);
7469  return VMA_NULL;
7470  }
7471 }
7472 
7473 bool VmaAllocation_T::CanBecomeLost() const
7474 {
7475  switch(m_Type)
7476  {
7477  case ALLOCATION_TYPE_BLOCK:
7478  return m_BlockAllocation.m_CanBecomeLost;
7479  case ALLOCATION_TYPE_DEDICATED:
7480  return false;
7481  default:
7482  VMA_ASSERT(0);
7483  return false;
7484  }
7485 }
7486 
7487 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7488 {
7489  VMA_ASSERT(CanBecomeLost());
7490 
7491  /*
7492  Warning: This is a carefully designed algorithm.
7493  Do not modify unless you really know what you're doing :)
7494  */
7495  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7496  for(;;)
7497  {
7498  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7499  {
7500  VMA_ASSERT(0);
7501  return false;
7502  }
7503  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7504  {
7505  return false;
7506  }
7507  else // Last use time earlier than current time.
7508  {
7509  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7510  {
7511  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7512  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7513  return true;
7514  }
7515  }
7516  }
7517 }
7518 
7519 #if VMA_STATS_STRING_ENABLED
7520 
7521 // Correspond to values of enum VmaSuballocationType.
7522 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7523  "FREE",
7524  "UNKNOWN",
7525  "BUFFER",
7526  "IMAGE_UNKNOWN",
7527  "IMAGE_LINEAR",
7528  "IMAGE_OPTIMAL",
7529 };
7530 
7531 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7532 {
7533  json.WriteString("Type");
7534  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7535 
7536  json.WriteString("Size");
7537  json.WriteNumber(m_Size);
7538 
7539  if(m_pUserData != VMA_NULL)
7540  {
7541  json.WriteString("UserData");
7542  if(IsUserDataString())
7543  {
7544  json.WriteString((const char*)m_pUserData);
7545  }
7546  else
7547  {
7548  json.BeginString();
7549  json.ContinueString_Pointer(m_pUserData);
7550  json.EndString();
7551  }
7552  }
7553 
7554  json.WriteString("CreationFrameIndex");
7555  json.WriteNumber(m_CreationFrameIndex);
7556 
7557  json.WriteString("LastUseFrameIndex");
7558  json.WriteNumber(GetLastUseFrameIndex());
7559 
7560  if(m_BufferImageUsage != 0)
7561  {
7562  json.WriteString("Usage");
7563  json.WriteNumber(m_BufferImageUsage);
7564  }
7565 }
7566 
7567 #endif
7568 
7569 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7570 {
7571  VMA_ASSERT(IsUserDataString());
7572  if(m_pUserData != VMA_NULL)
7573  {
7574  char* const oldStr = (char*)m_pUserData;
7575  const size_t oldStrLen = strlen(oldStr);
7576  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7577  m_pUserData = VMA_NULL;
7578  }
7579 }
7580 
7581 void VmaAllocation_T::BlockAllocMap()
7582 {
7583  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7584 
7585  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7586  {
7587  ++m_MapCount;
7588  }
7589  else
7590  {
7591  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7592  }
7593 }
7594 
7595 void VmaAllocation_T::BlockAllocUnmap()
7596 {
7597  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7598 
7599  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7600  {
7601  --m_MapCount;
7602  }
7603  else
7604  {
7605  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7606  }
7607 }
7608 
7609 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7610 {
7611  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7612 
7613  if(m_MapCount != 0)
7614  {
7615  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7616  {
7617  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7618  *ppData = m_DedicatedAllocation.m_pMappedData;
7619  ++m_MapCount;
7620  return VK_SUCCESS;
7621  }
7622  else
7623  {
7624  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7625  return VK_ERROR_MEMORY_MAP_FAILED;
7626  }
7627  }
7628  else
7629  {
7630  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7631  hAllocator->m_hDevice,
7632  m_DedicatedAllocation.m_hMemory,
7633  0, // offset
7634  VK_WHOLE_SIZE,
7635  0, // flags
7636  ppData);
7637  if(result == VK_SUCCESS)
7638  {
7639  m_DedicatedAllocation.m_pMappedData = *ppData;
7640  m_MapCount = 1;
7641  }
7642  return result;
7643  }
7644 }
7645 
7646 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7647 {
7648  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7649 
7650  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7651  {
7652  --m_MapCount;
7653  if(m_MapCount == 0)
7654  {
7655  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7656  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7657  hAllocator->m_hDevice,
7658  m_DedicatedAllocation.m_hMemory);
7659  }
7660  }
7661  else
7662  {
7663  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7664  }
7665 }
7666 
7667 #if VMA_STATS_STRING_ENABLED
7668 
7669 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7670 {
7671  json.BeginObject();
7672 
7673  json.WriteString("Blocks");
7674  json.WriteNumber(stat.blockCount);
7675 
7676  json.WriteString("Allocations");
7677  json.WriteNumber(stat.allocationCount);
7678 
7679  json.WriteString("UnusedRanges");
7680  json.WriteNumber(stat.unusedRangeCount);
7681 
7682  json.WriteString("UsedBytes");
7683  json.WriteNumber(stat.usedBytes);
7684 
7685  json.WriteString("UnusedBytes");
7686  json.WriteNumber(stat.unusedBytes);
7687 
7688  if(stat.allocationCount > 1)
7689  {
7690  json.WriteString("AllocationSize");
7691  json.BeginObject(true);
7692  json.WriteString("Min");
7693  json.WriteNumber(stat.allocationSizeMin);
7694  json.WriteString("Avg");
7695  json.WriteNumber(stat.allocationSizeAvg);
7696  json.WriteString("Max");
7697  json.WriteNumber(stat.allocationSizeMax);
7698  json.EndObject();
7699  }
7700 
7701  if(stat.unusedRangeCount > 1)
7702  {
7703  json.WriteString("UnusedRangeSize");
7704  json.BeginObject(true);
7705  json.WriteString("Min");
7706  json.WriteNumber(stat.unusedRangeSizeMin);
7707  json.WriteString("Avg");
7708  json.WriteNumber(stat.unusedRangeSizeAvg);
7709  json.WriteString("Max");
7710  json.WriteNumber(stat.unusedRangeSizeMax);
7711  json.EndObject();
7712  }
7713 
7714  json.EndObject();
7715 }
7716 
7717 #endif // #if VMA_STATS_STRING_ENABLED
7718 
7719 struct VmaSuballocationItemSizeLess
7720 {
7721  bool operator()(
7722  const VmaSuballocationList::iterator lhs,
7723  const VmaSuballocationList::iterator rhs) const
7724  {
7725  return lhs->size < rhs->size;
7726  }
7727  bool operator()(
7728  const VmaSuballocationList::iterator lhs,
7729  VkDeviceSize rhsSize) const
7730  {
7731  return lhs->size < rhsSize;
7732  }
7733 };
7734 
7735 
7737 // class VmaBlockMetadata
7738 
7739 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7740  m_Size(0),
7741  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7742 {
7743 }
7744 
7745 #if VMA_STATS_STRING_ENABLED
7746 
7747 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7748  VkDeviceSize unusedBytes,
7749  size_t allocationCount,
7750  size_t unusedRangeCount) const
7751 {
7752  json.BeginObject();
7753 
7754  json.WriteString("TotalBytes");
7755  json.WriteNumber(GetSize());
7756 
7757  json.WriteString("UnusedBytes");
7758  json.WriteNumber(unusedBytes);
7759 
7760  json.WriteString("Allocations");
7761  json.WriteNumber((uint64_t)allocationCount);
7762 
7763  json.WriteString("UnusedRanges");
7764  json.WriteNumber((uint64_t)unusedRangeCount);
7765 
7766  json.WriteString("Suballocations");
7767  json.BeginArray();
7768 }
7769 
7770 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7771  VkDeviceSize offset,
7772  VmaAllocation hAllocation) const
7773 {
7774  json.BeginObject(true);
7775 
7776  json.WriteString("Offset");
7777  json.WriteNumber(offset);
7778 
7779  hAllocation->PrintParameters(json);
7780 
7781  json.EndObject();
7782 }
7783 
7784 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7785  VkDeviceSize offset,
7786  VkDeviceSize size) const
7787 {
7788  json.BeginObject(true);
7789 
7790  json.WriteString("Offset");
7791  json.WriteNumber(offset);
7792 
7793  json.WriteString("Type");
7794  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7795 
7796  json.WriteString("Size");
7797  json.WriteNumber(size);
7798 
7799  json.EndObject();
7800 }
7801 
7802 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7803 {
7804  json.EndArray();
7805  json.EndObject();
7806 }
7807 
7808 #endif // #if VMA_STATS_STRING_ENABLED
7809 
7811 // class VmaBlockMetadata_Generic
7812 
7813 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7814  VmaBlockMetadata(hAllocator),
7815  m_FreeCount(0),
7816  m_SumFreeSize(0),
7817  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7818  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7819 {
7820 }
7821 
7822 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7823 {
7824 }
7825 
7826 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7827 {
7828  VmaBlockMetadata::Init(size);
7829 
7830  m_FreeCount = 1;
7831  m_SumFreeSize = size;
7832 
7833  VmaSuballocation suballoc = {};
7834  suballoc.offset = 0;
7835  suballoc.size = size;
7836  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7837  suballoc.hAllocation = VK_NULL_HANDLE;
7838 
7839  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7840  m_Suballocations.push_back(suballoc);
7841  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7842  --suballocItem;
7843  m_FreeSuballocationsBySize.push_back(suballocItem);
7844 }
7845 
7846 bool VmaBlockMetadata_Generic::Validate() const
7847 {
7848  VMA_VALIDATE(!m_Suballocations.empty());
7849 
7850  // Expected offset of new suballocation as calculated from previous ones.
7851  VkDeviceSize calculatedOffset = 0;
7852  // Expected number of free suballocations as calculated from traversing their list.
7853  uint32_t calculatedFreeCount = 0;
7854  // Expected sum size of free suballocations as calculated from traversing their list.
7855  VkDeviceSize calculatedSumFreeSize = 0;
7856  // Expected number of free suballocations that should be registered in
7857  // m_FreeSuballocationsBySize calculated from traversing their list.
7858  size_t freeSuballocationsToRegister = 0;
7859  // True if previous visited suballocation was free.
7860  bool prevFree = false;
7861 
7862  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7863  suballocItem != m_Suballocations.cend();
7864  ++suballocItem)
7865  {
7866  const VmaSuballocation& subAlloc = *suballocItem;
7867 
7868  // Actual offset of this suballocation doesn't match expected one.
7869  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7870 
7871  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7872  // Two adjacent free suballocations are invalid. They should be merged.
7873  VMA_VALIDATE(!prevFree || !currFree);
7874 
7875  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7876 
7877  if(currFree)
7878  {
7879  calculatedSumFreeSize += subAlloc.size;
7880  ++calculatedFreeCount;
7881  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7882  {
7883  ++freeSuballocationsToRegister;
7884  }
7885 
7886  // Margin required between allocations - every free space must be at least that large.
7887  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7888  }
7889  else
7890  {
7891  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7892  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7893 
7894  // Margin required between allocations - previous allocation must be free.
7895  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7896  }
7897 
7898  calculatedOffset += subAlloc.size;
7899  prevFree = currFree;
7900  }
7901 
7902  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7903  // match expected one.
7904  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7905 
7906  VkDeviceSize lastSize = 0;
7907  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7908  {
7909  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7910 
7911  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7912  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7913  // They must be sorted by size ascending.
7914  VMA_VALIDATE(suballocItem->size >= lastSize);
7915 
7916  lastSize = suballocItem->size;
7917  }
7918 
7919  // Check if totals match calculacted values.
7920  VMA_VALIDATE(ValidateFreeSuballocationList());
7921  VMA_VALIDATE(calculatedOffset == GetSize());
7922  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7923  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7924 
7925  return true;
7926 }
7927 
7928 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7929 {
7930  if(!m_FreeSuballocationsBySize.empty())
7931  {
7932  return m_FreeSuballocationsBySize.back()->size;
7933  }
7934  else
7935  {
7936  return 0;
7937  }
7938 }
7939 
7940 bool VmaBlockMetadata_Generic::IsEmpty() const
7941 {
7942  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7943 }
7944 
7945 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7946 {
7947  outInfo.blockCount = 1;
7948 
7949  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7950  outInfo.allocationCount = rangeCount - m_FreeCount;
7951  outInfo.unusedRangeCount = m_FreeCount;
7952 
7953  outInfo.unusedBytes = m_SumFreeSize;
7954  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7955 
7956  outInfo.allocationSizeMin = UINT64_MAX;
7957  outInfo.allocationSizeMax = 0;
7958  outInfo.unusedRangeSizeMin = UINT64_MAX;
7959  outInfo.unusedRangeSizeMax = 0;
7960 
7961  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7962  suballocItem != m_Suballocations.cend();
7963  ++suballocItem)
7964  {
7965  const VmaSuballocation& suballoc = *suballocItem;
7966  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7967  {
7968  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7969  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7970  }
7971  else
7972  {
7973  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7974  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7975  }
7976  }
7977 }
7978 
7979 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7980 {
7981  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7982 
7983  inoutStats.size += GetSize();
7984  inoutStats.unusedSize += m_SumFreeSize;
7985  inoutStats.allocationCount += rangeCount - m_FreeCount;
7986  inoutStats.unusedRangeCount += m_FreeCount;
7987  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7988 }
7989 
7990 #if VMA_STATS_STRING_ENABLED
7991 
7992 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7993 {
7994  PrintDetailedMap_Begin(json,
7995  m_SumFreeSize, // unusedBytes
7996  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7997  m_FreeCount); // unusedRangeCount
7998 
7999  size_t i = 0;
8000  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8001  suballocItem != m_Suballocations.cend();
8002  ++suballocItem, ++i)
8003  {
8004  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8005  {
8006  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8007  }
8008  else
8009  {
8010  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8011  }
8012  }
8013 
8014  PrintDetailedMap_End(json);
8015 }
8016 
8017 #endif // #if VMA_STATS_STRING_ENABLED
8018 
8019 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8020  uint32_t currentFrameIndex,
8021  uint32_t frameInUseCount,
8022  VkDeviceSize bufferImageGranularity,
8023  VkDeviceSize allocSize,
8024  VkDeviceSize allocAlignment,
8025  bool upperAddress,
8026  VmaSuballocationType allocType,
8027  bool canMakeOtherLost,
8028  uint32_t strategy,
8029  VmaAllocationRequest* pAllocationRequest)
8030 {
8031  VMA_ASSERT(allocSize > 0);
8032  VMA_ASSERT(!upperAddress);
8033  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8034  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8035  VMA_HEAVY_ASSERT(Validate());
8036 
8037  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8038 
8039  // There is not enough total free space in this block to fullfill the request: Early return.
8040  if(canMakeOtherLost == false &&
8041  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8042  {
8043  return false;
8044  }
8045 
8046  // New algorithm, efficiently searching freeSuballocationsBySize.
8047  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8048  if(freeSuballocCount > 0)
8049  {
8051  {
8052  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8053  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8054  m_FreeSuballocationsBySize.data(),
8055  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8056  allocSize + 2 * VMA_DEBUG_MARGIN,
8057  VmaSuballocationItemSizeLess());
8058  size_t index = it - m_FreeSuballocationsBySize.data();
8059  for(; index < freeSuballocCount; ++index)
8060  {
8061  if(CheckAllocation(
8062  currentFrameIndex,
8063  frameInUseCount,
8064  bufferImageGranularity,
8065  allocSize,
8066  allocAlignment,
8067  allocType,
8068  m_FreeSuballocationsBySize[index],
8069  false, // canMakeOtherLost
8070  &pAllocationRequest->offset,
8071  &pAllocationRequest->itemsToMakeLostCount,
8072  &pAllocationRequest->sumFreeSize,
8073  &pAllocationRequest->sumItemSize))
8074  {
8075  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8076  return true;
8077  }
8078  }
8079  }
8080  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8081  {
8082  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8083  it != m_Suballocations.end();
8084  ++it)
8085  {
8086  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8087  currentFrameIndex,
8088  frameInUseCount,
8089  bufferImageGranularity,
8090  allocSize,
8091  allocAlignment,
8092  allocType,
8093  it,
8094  false, // canMakeOtherLost
8095  &pAllocationRequest->offset,
8096  &pAllocationRequest->itemsToMakeLostCount,
8097  &pAllocationRequest->sumFreeSize,
8098  &pAllocationRequest->sumItemSize))
8099  {
8100  pAllocationRequest->item = it;
8101  return true;
8102  }
8103  }
8104  }
8105  else // WORST_FIT, FIRST_FIT
8106  {
8107  // Search staring from biggest suballocations.
8108  for(size_t index = freeSuballocCount; index--; )
8109  {
8110  if(CheckAllocation(
8111  currentFrameIndex,
8112  frameInUseCount,
8113  bufferImageGranularity,
8114  allocSize,
8115  allocAlignment,
8116  allocType,
8117  m_FreeSuballocationsBySize[index],
8118  false, // canMakeOtherLost
8119  &pAllocationRequest->offset,
8120  &pAllocationRequest->itemsToMakeLostCount,
8121  &pAllocationRequest->sumFreeSize,
8122  &pAllocationRequest->sumItemSize))
8123  {
8124  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8125  return true;
8126  }
8127  }
8128  }
8129  }
8130 
8131  if(canMakeOtherLost)
8132  {
8133  // Brute-force algorithm. TODO: Come up with something better.
8134 
8135  bool found = false;
8136  VmaAllocationRequest tmpAllocRequest = {};
8137  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8138  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8139  suballocIt != m_Suballocations.end();
8140  ++suballocIt)
8141  {
8142  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8143  suballocIt->hAllocation->CanBecomeLost())
8144  {
8145  if(CheckAllocation(
8146  currentFrameIndex,
8147  frameInUseCount,
8148  bufferImageGranularity,
8149  allocSize,
8150  allocAlignment,
8151  allocType,
8152  suballocIt,
8153  canMakeOtherLost,
8154  &tmpAllocRequest.offset,
8155  &tmpAllocRequest.itemsToMakeLostCount,
8156  &tmpAllocRequest.sumFreeSize,
8157  &tmpAllocRequest.sumItemSize))
8158  {
8160  {
8161  *pAllocationRequest = tmpAllocRequest;
8162  pAllocationRequest->item = suballocIt;
8163  break;
8164  }
8165  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8166  {
8167  *pAllocationRequest = tmpAllocRequest;
8168  pAllocationRequest->item = suballocIt;
8169  found = true;
8170  }
8171  }
8172  }
8173  }
8174 
8175  return found;
8176  }
8177 
8178  return false;
8179 }
8180 
8181 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8182  uint32_t currentFrameIndex,
8183  uint32_t frameInUseCount,
8184  VmaAllocationRequest* pAllocationRequest)
8185 {
8186  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8187 
8188  while(pAllocationRequest->itemsToMakeLostCount > 0)
8189  {
8190  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8191  {
8192  ++pAllocationRequest->item;
8193  }
8194  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8195  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8196  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8197  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8198  {
8199  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8200  --pAllocationRequest->itemsToMakeLostCount;
8201  }
8202  else
8203  {
8204  return false;
8205  }
8206  }
8207 
8208  VMA_HEAVY_ASSERT(Validate());
8209  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8210  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8211 
8212  return true;
8213 }
8214 
8215 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8216 {
8217  uint32_t lostAllocationCount = 0;
8218  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8219  it != m_Suballocations.end();
8220  ++it)
8221  {
8222  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8223  it->hAllocation->CanBecomeLost() &&
8224  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8225  {
8226  it = FreeSuballocation(it);
8227  ++lostAllocationCount;
8228  }
8229  }
8230  return lostAllocationCount;
8231 }
8232 
8233 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8234 {
8235  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8236  it != m_Suballocations.end();
8237  ++it)
8238  {
8239  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8240  {
8241  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8242  {
8243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8244  return VK_ERROR_VALIDATION_FAILED_EXT;
8245  }
8246  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8247  {
8248  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8249  return VK_ERROR_VALIDATION_FAILED_EXT;
8250  }
8251  }
8252  }
8253 
8254  return VK_SUCCESS;
8255 }
8256 
8257 void VmaBlockMetadata_Generic::Alloc(
8258  const VmaAllocationRequest& request,
8259  VmaSuballocationType type,
8260  VkDeviceSize allocSize,
8261  VmaAllocation hAllocation)
8262 {
8263  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8264  VMA_ASSERT(request.item != m_Suballocations.end());
8265  VmaSuballocation& suballoc = *request.item;
8266  // Given suballocation is a free block.
8267  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8268  // Given offset is inside this suballocation.
8269  VMA_ASSERT(request.offset >= suballoc.offset);
8270  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8271  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8272  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8273 
8274  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8275  // it to become used.
8276  UnregisterFreeSuballocation(request.item);
8277 
8278  suballoc.offset = request.offset;
8279  suballoc.size = allocSize;
8280  suballoc.type = type;
8281  suballoc.hAllocation = hAllocation;
8282 
8283  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8284  if(paddingEnd)
8285  {
8286  VmaSuballocation paddingSuballoc = {};
8287  paddingSuballoc.offset = request.offset + allocSize;
8288  paddingSuballoc.size = paddingEnd;
8289  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8290  VmaSuballocationList::iterator next = request.item;
8291  ++next;
8292  const VmaSuballocationList::iterator paddingEndItem =
8293  m_Suballocations.insert(next, paddingSuballoc);
8294  RegisterFreeSuballocation(paddingEndItem);
8295  }
8296 
8297  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8298  if(paddingBegin)
8299  {
8300  VmaSuballocation paddingSuballoc = {};
8301  paddingSuballoc.offset = request.offset - paddingBegin;
8302  paddingSuballoc.size = paddingBegin;
8303  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8304  const VmaSuballocationList::iterator paddingBeginItem =
8305  m_Suballocations.insert(request.item, paddingSuballoc);
8306  RegisterFreeSuballocation(paddingBeginItem);
8307  }
8308 
8309  // Update totals.
8310  m_FreeCount = m_FreeCount - 1;
8311  if(paddingBegin > 0)
8312  {
8313  ++m_FreeCount;
8314  }
8315  if(paddingEnd > 0)
8316  {
8317  ++m_FreeCount;
8318  }
8319  m_SumFreeSize -= allocSize;
8320 }
8321 
8322 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8323 {
8324  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8325  suballocItem != m_Suballocations.end();
8326  ++suballocItem)
8327  {
8328  VmaSuballocation& suballoc = *suballocItem;
8329  if(suballoc.hAllocation == allocation)
8330  {
8331  FreeSuballocation(suballocItem);
8332  VMA_HEAVY_ASSERT(Validate());
8333  return;
8334  }
8335  }
8336  VMA_ASSERT(0 && "Not found!");
8337 }
8338 
8339 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8340 {
8341  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8342  suballocItem != m_Suballocations.end();
8343  ++suballocItem)
8344  {
8345  VmaSuballocation& suballoc = *suballocItem;
8346  if(suballoc.offset == offset)
8347  {
8348  FreeSuballocation(suballocItem);
8349  return;
8350  }
8351  }
8352  VMA_ASSERT(0 && "Not found!");
8353 }
8354 
8355 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8356 {
8357  VkDeviceSize lastSize = 0;
8358  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8359  {
8360  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8361 
8362  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8363  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8364  VMA_VALIDATE(it->size >= lastSize);
8365  lastSize = it->size;
8366  }
8367  return true;
8368 }
8369 
8370 bool VmaBlockMetadata_Generic::CheckAllocation(
8371  uint32_t currentFrameIndex,
8372  uint32_t frameInUseCount,
8373  VkDeviceSize bufferImageGranularity,
8374  VkDeviceSize allocSize,
8375  VkDeviceSize allocAlignment,
8376  VmaSuballocationType allocType,
8377  VmaSuballocationList::const_iterator suballocItem,
8378  bool canMakeOtherLost,
8379  VkDeviceSize* pOffset,
8380  size_t* itemsToMakeLostCount,
8381  VkDeviceSize* pSumFreeSize,
8382  VkDeviceSize* pSumItemSize) const
8383 {
8384  VMA_ASSERT(allocSize > 0);
8385  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8386  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8387  VMA_ASSERT(pOffset != VMA_NULL);
8388 
8389  *itemsToMakeLostCount = 0;
8390  *pSumFreeSize = 0;
8391  *pSumItemSize = 0;
8392 
8393  if(canMakeOtherLost)
8394  {
8395  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8396  {
8397  *pSumFreeSize = suballocItem->size;
8398  }
8399  else
8400  {
8401  if(suballocItem->hAllocation->CanBecomeLost() &&
8402  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8403  {
8404  ++*itemsToMakeLostCount;
8405  *pSumItemSize = suballocItem->size;
8406  }
8407  else
8408  {
8409  return false;
8410  }
8411  }
8412 
8413  // Remaining size is too small for this request: Early return.
8414  if(GetSize() - suballocItem->offset < allocSize)
8415  {
8416  return false;
8417  }
8418 
8419  // Start from offset equal to beginning of this suballocation.
8420  *pOffset = suballocItem->offset;
8421 
8422  // Apply VMA_DEBUG_MARGIN at the beginning.
8423  if(VMA_DEBUG_MARGIN > 0)
8424  {
8425  *pOffset += VMA_DEBUG_MARGIN;
8426  }
8427 
8428  // Apply alignment.
8429  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8430 
8431  // Check previous suballocations for BufferImageGranularity conflicts.
8432  // Make bigger alignment if necessary.
8433  if(bufferImageGranularity > 1)
8434  {
8435  bool bufferImageGranularityConflict = false;
8436  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8437  while(prevSuballocItem != m_Suballocations.cbegin())
8438  {
8439  --prevSuballocItem;
8440  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8441  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8442  {
8443  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8444  {
8445  bufferImageGranularityConflict = true;
8446  break;
8447  }
8448  }
8449  else
8450  // Already on previous page.
8451  break;
8452  }
8453  if(bufferImageGranularityConflict)
8454  {
8455  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8456  }
8457  }
8458 
8459  // Now that we have final *pOffset, check if we are past suballocItem.
8460  // If yes, return false - this function should be called for another suballocItem as starting point.
8461  if(*pOffset >= suballocItem->offset + suballocItem->size)
8462  {
8463  return false;
8464  }
8465 
8466  // Calculate padding at the beginning based on current offset.
8467  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8468 
8469  // Calculate required margin at the end.
8470  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8471 
8472  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8473  // Another early return check.
8474  if(suballocItem->offset + totalSize > GetSize())
8475  {
8476  return false;
8477  }
8478 
8479  // Advance lastSuballocItem until desired size is reached.
8480  // Update itemsToMakeLostCount.
8481  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8482  if(totalSize > suballocItem->size)
8483  {
8484  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8485  while(remainingSize > 0)
8486  {
8487  ++lastSuballocItem;
8488  if(lastSuballocItem == m_Suballocations.cend())
8489  {
8490  return false;
8491  }
8492  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8493  {
8494  *pSumFreeSize += lastSuballocItem->size;
8495  }
8496  else
8497  {
8498  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8499  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8500  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8501  {
8502  ++*itemsToMakeLostCount;
8503  *pSumItemSize += lastSuballocItem->size;
8504  }
8505  else
8506  {
8507  return false;
8508  }
8509  }
8510  remainingSize = (lastSuballocItem->size < remainingSize) ?
8511  remainingSize - lastSuballocItem->size : 0;
8512  }
8513  }
8514 
8515  // Check next suballocations for BufferImageGranularity conflicts.
8516  // If conflict exists, we must mark more allocations lost or fail.
8517  if(bufferImageGranularity > 1)
8518  {
8519  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8520  ++nextSuballocItem;
8521  while(nextSuballocItem != m_Suballocations.cend())
8522  {
8523  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8524  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8525  {
8526  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8527  {
8528  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8529  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8530  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8531  {
8532  ++*itemsToMakeLostCount;
8533  }
8534  else
8535  {
8536  return false;
8537  }
8538  }
8539  }
8540  else
8541  {
8542  // Already on next page.
8543  break;
8544  }
8545  ++nextSuballocItem;
8546  }
8547  }
8548  }
8549  else
8550  {
8551  const VmaSuballocation& suballoc = *suballocItem;
8552  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8553 
8554  *pSumFreeSize = suballoc.size;
8555 
8556  // Size of this suballocation is too small for this request: Early return.
8557  if(suballoc.size < allocSize)
8558  {
8559  return false;
8560  }
8561 
8562  // Start from offset equal to beginning of this suballocation.
8563  *pOffset = suballoc.offset;
8564 
8565  // Apply VMA_DEBUG_MARGIN at the beginning.
8566  if(VMA_DEBUG_MARGIN > 0)
8567  {
8568  *pOffset += VMA_DEBUG_MARGIN;
8569  }
8570 
8571  // Apply alignment.
8572  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8573 
8574  // Check previous suballocations for BufferImageGranularity conflicts.
8575  // Make bigger alignment if necessary.
8576  if(bufferImageGranularity > 1)
8577  {
8578  bool bufferImageGranularityConflict = false;
8579  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8580  while(prevSuballocItem != m_Suballocations.cbegin())
8581  {
8582  --prevSuballocItem;
8583  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8584  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8585  {
8586  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8587  {
8588  bufferImageGranularityConflict = true;
8589  break;
8590  }
8591  }
8592  else
8593  // Already on previous page.
8594  break;
8595  }
8596  if(bufferImageGranularityConflict)
8597  {
8598  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8599  }
8600  }
8601 
8602  // Calculate padding at the beginning based on current offset.
8603  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8604 
8605  // Calculate required margin at the end.
8606  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8607 
8608  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8609  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8610  {
8611  return false;
8612  }
8613 
8614  // Check next suballocations for BufferImageGranularity conflicts.
8615  // If conflict exists, allocation cannot be made here.
8616  if(bufferImageGranularity > 1)
8617  {
8618  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8619  ++nextSuballocItem;
8620  while(nextSuballocItem != m_Suballocations.cend())
8621  {
8622  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8623  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8624  {
8625  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8626  {
8627  return false;
8628  }
8629  }
8630  else
8631  {
8632  // Already on next page.
8633  break;
8634  }
8635  ++nextSuballocItem;
8636  }
8637  }
8638  }
8639 
8640  // All tests passed: Success. pOffset is already filled.
8641  return true;
8642 }
8643 
8644 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8645 {
8646  VMA_ASSERT(item != m_Suballocations.end());
8647  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8648 
8649  VmaSuballocationList::iterator nextItem = item;
8650  ++nextItem;
8651  VMA_ASSERT(nextItem != m_Suballocations.end());
8652  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8653 
8654  item->size += nextItem->size;
8655  --m_FreeCount;
8656  m_Suballocations.erase(nextItem);
8657 }
8658 
8659 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8660 {
8661  // Change this suballocation to be marked as free.
8662  VmaSuballocation& suballoc = *suballocItem;
8663  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8664  suballoc.hAllocation = VK_NULL_HANDLE;
8665 
8666  // Update totals.
8667  ++m_FreeCount;
8668  m_SumFreeSize += suballoc.size;
8669 
8670  // Merge with previous and/or next suballocation if it's also free.
8671  bool mergeWithNext = false;
8672  bool mergeWithPrev = false;
8673 
8674  VmaSuballocationList::iterator nextItem = suballocItem;
8675  ++nextItem;
8676  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8677  {
8678  mergeWithNext = true;
8679  }
8680 
8681  VmaSuballocationList::iterator prevItem = suballocItem;
8682  if(suballocItem != m_Suballocations.begin())
8683  {
8684  --prevItem;
8685  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8686  {
8687  mergeWithPrev = true;
8688  }
8689  }
8690 
8691  if(mergeWithNext)
8692  {
8693  UnregisterFreeSuballocation(nextItem);
8694  MergeFreeWithNext(suballocItem);
8695  }
8696 
8697  if(mergeWithPrev)
8698  {
8699  UnregisterFreeSuballocation(prevItem);
8700  MergeFreeWithNext(prevItem);
8701  RegisterFreeSuballocation(prevItem);
8702  return prevItem;
8703  }
8704  else
8705  {
8706  RegisterFreeSuballocation(suballocItem);
8707  return suballocItem;
8708  }
8709 }
8710 
8711 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8712 {
8713  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8714  VMA_ASSERT(item->size > 0);
8715 
8716  // You may want to enable this validation at the beginning or at the end of
8717  // this function, depending on what do you want to check.
8718  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8719 
8720  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8721  {
8722  if(m_FreeSuballocationsBySize.empty())
8723  {
8724  m_FreeSuballocationsBySize.push_back(item);
8725  }
8726  else
8727  {
8728  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8729  }
8730  }
8731 
8732  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8733 }
8734 
8735 
8736 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8737 {
8738  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8739  VMA_ASSERT(item->size > 0);
8740 
8741  // You may want to enable this validation at the beginning or at the end of
8742  // this function, depending on what do you want to check.
8743  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8744 
8745  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8746  {
8747  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8748  m_FreeSuballocationsBySize.data(),
8749  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8750  item,
8751  VmaSuballocationItemSizeLess());
8752  for(size_t index = it - m_FreeSuballocationsBySize.data();
8753  index < m_FreeSuballocationsBySize.size();
8754  ++index)
8755  {
8756  if(m_FreeSuballocationsBySize[index] == item)
8757  {
8758  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8759  return;
8760  }
8761  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8762  }
8763  VMA_ASSERT(0 && "Not found.");
8764  }
8765 
8766  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8767 }
8768 
8769 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8770  VkDeviceSize bufferImageGranularity,
8771  VmaSuballocationType& inOutPrevSuballocType) const
8772 {
8773  if(bufferImageGranularity == 1 || IsEmpty())
8774  {
8775  return false;
8776  }
8777 
8778  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8779  bool typeConflictFound = false;
8780  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8781  it != m_Suballocations.cend();
8782  ++it)
8783  {
8784  const VmaSuballocationType suballocType = it->type;
8785  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8786  {
8787  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8788  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8789  {
8790  typeConflictFound = true;
8791  }
8792  inOutPrevSuballocType = suballocType;
8793  }
8794  }
8795 
8796  return typeConflictFound || minAlignment >= bufferImageGranularity;
8797 }
8798 
8800 // class VmaBlockMetadata_Linear
8801 
8802 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8803  VmaBlockMetadata(hAllocator),
8804  m_SumFreeSize(0),
8805  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8806  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8807  m_1stVectorIndex(0),
8808  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8809  m_1stNullItemsBeginCount(0),
8810  m_1stNullItemsMiddleCount(0),
8811  m_2ndNullItemsCount(0)
8812 {
8813 }
8814 
8815 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8816 {
8817 }
8818 
8819 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8820 {
8821  VmaBlockMetadata::Init(size);
8822  m_SumFreeSize = size;
8823 }
8824 
8825 bool VmaBlockMetadata_Linear::Validate() const
8826 {
8827  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8828  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8829 
8830  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8831  VMA_VALIDATE(!suballocations1st.empty() ||
8832  suballocations2nd.empty() ||
8833  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8834 
8835  if(!suballocations1st.empty())
8836  {
8837  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8838  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8839  // Null item at the end should be just pop_back().
8840  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8841  }
8842  if(!suballocations2nd.empty())
8843  {
8844  // Null item at the end should be just pop_back().
8845  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8846  }
8847 
8848  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8849  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8850 
8851  VkDeviceSize sumUsedSize = 0;
8852  const size_t suballoc1stCount = suballocations1st.size();
8853  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8854 
8855  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8856  {
8857  const size_t suballoc2ndCount = suballocations2nd.size();
8858  size_t nullItem2ndCount = 0;
8859  for(size_t i = 0; i < suballoc2ndCount; ++i)
8860  {
8861  const VmaSuballocation& suballoc = suballocations2nd[i];
8862  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8863 
8864  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8865  VMA_VALIDATE(suballoc.offset >= offset);
8866 
8867  if(!currFree)
8868  {
8869  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8870  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8871  sumUsedSize += suballoc.size;
8872  }
8873  else
8874  {
8875  ++nullItem2ndCount;
8876  }
8877 
8878  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8879  }
8880 
8881  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8882  }
8883 
8884  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8885  {
8886  const VmaSuballocation& suballoc = suballocations1st[i];
8887  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8888  suballoc.hAllocation == VK_NULL_HANDLE);
8889  }
8890 
8891  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8892 
8893  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8894  {
8895  const VmaSuballocation& suballoc = suballocations1st[i];
8896  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8897 
8898  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8899  VMA_VALIDATE(suballoc.offset >= offset);
8900  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8901 
8902  if(!currFree)
8903  {
8904  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8905  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8906  sumUsedSize += suballoc.size;
8907  }
8908  else
8909  {
8910  ++nullItem1stCount;
8911  }
8912 
8913  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8914  }
8915  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8916 
8917  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8918  {
8919  const size_t suballoc2ndCount = suballocations2nd.size();
8920  size_t nullItem2ndCount = 0;
8921  for(size_t i = suballoc2ndCount; i--; )
8922  {
8923  const VmaSuballocation& suballoc = suballocations2nd[i];
8924  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8925 
8926  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8927  VMA_VALIDATE(suballoc.offset >= offset);
8928 
8929  if(!currFree)
8930  {
8931  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8932  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8933  sumUsedSize += suballoc.size;
8934  }
8935  else
8936  {
8937  ++nullItem2ndCount;
8938  }
8939 
8940  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8941  }
8942 
8943  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8944  }
8945 
8946  VMA_VALIDATE(offset <= GetSize());
8947  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8948 
8949  return true;
8950 }
8951 
8952 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8953 {
8954  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8955  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8956 }
8957 
8958 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8959 {
8960  const VkDeviceSize size = GetSize();
8961 
8962  /*
8963  We don't consider gaps inside allocation vectors with freed allocations because
8964  they are not suitable for reuse in linear allocator. We consider only space that
8965  is available for new allocations.
8966  */
8967  if(IsEmpty())
8968  {
8969  return size;
8970  }
8971 
8972  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8973 
8974  switch(m_2ndVectorMode)
8975  {
8976  case SECOND_VECTOR_EMPTY:
8977  /*
8978  Available space is after end of 1st, as well as before beginning of 1st (which
8979  whould make it a ring buffer).
8980  */
8981  {
8982  const size_t suballocations1stCount = suballocations1st.size();
8983  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8984  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8985  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8986  return VMA_MAX(
8987  firstSuballoc.offset,
8988  size - (lastSuballoc.offset + lastSuballoc.size));
8989  }
8990  break;
8991 
8992  case SECOND_VECTOR_RING_BUFFER:
8993  /*
8994  Available space is only between end of 2nd and beginning of 1st.
8995  */
8996  {
8997  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8998  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8999  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9000  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9001  }
9002  break;
9003 
9004  case SECOND_VECTOR_DOUBLE_STACK:
9005  /*
9006  Available space is only between end of 1st and top of 2nd.
9007  */
9008  {
9009  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9010  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9011  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9012  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9013  }
9014  break;
9015 
9016  default:
9017  VMA_ASSERT(0);
9018  return 0;
9019  }
9020 }
9021 
9022 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9023 {
9024  const VkDeviceSize size = GetSize();
9025  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9026  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9027  const size_t suballoc1stCount = suballocations1st.size();
9028  const size_t suballoc2ndCount = suballocations2nd.size();
9029 
9030  outInfo.blockCount = 1;
9031  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9032  outInfo.unusedRangeCount = 0;
9033  outInfo.usedBytes = 0;
9034  outInfo.allocationSizeMin = UINT64_MAX;
9035  outInfo.allocationSizeMax = 0;
9036  outInfo.unusedRangeSizeMin = UINT64_MAX;
9037  outInfo.unusedRangeSizeMax = 0;
9038 
9039  VkDeviceSize lastOffset = 0;
9040 
9041  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9042  {
9043  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9044  size_t nextAlloc2ndIndex = 0;
9045  while(lastOffset < freeSpace2ndTo1stEnd)
9046  {
9047  // Find next non-null allocation or move nextAllocIndex to the end.
9048  while(nextAlloc2ndIndex < suballoc2ndCount &&
9049  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9050  {
9051  ++nextAlloc2ndIndex;
9052  }
9053 
9054  // Found non-null allocation.
9055  if(nextAlloc2ndIndex < suballoc2ndCount)
9056  {
9057  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9058 
9059  // 1. Process free space before this allocation.
9060  if(lastOffset < suballoc.offset)
9061  {
9062  // There is free space from lastOffset to suballoc.offset.
9063  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9064  ++outInfo.unusedRangeCount;
9065  outInfo.unusedBytes += unusedRangeSize;
9066  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9067  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9068  }
9069 
9070  // 2. Process this allocation.
9071  // There is allocation with suballoc.offset, suballoc.size.
9072  outInfo.usedBytes += suballoc.size;
9073  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9074  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9075 
9076  // 3. Prepare for next iteration.
9077  lastOffset = suballoc.offset + suballoc.size;
9078  ++nextAlloc2ndIndex;
9079  }
9080  // We are at the end.
9081  else
9082  {
9083  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9084  if(lastOffset < freeSpace2ndTo1stEnd)
9085  {
9086  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9087  ++outInfo.unusedRangeCount;
9088  outInfo.unusedBytes += unusedRangeSize;
9089  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9090  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9091  }
9092 
9093  // End of loop.
9094  lastOffset = freeSpace2ndTo1stEnd;
9095  }
9096  }
9097  }
9098 
9099  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9100  const VkDeviceSize freeSpace1stTo2ndEnd =
9101  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9102  while(lastOffset < freeSpace1stTo2ndEnd)
9103  {
9104  // Find next non-null allocation or move nextAllocIndex to the end.
9105  while(nextAlloc1stIndex < suballoc1stCount &&
9106  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9107  {
9108  ++nextAlloc1stIndex;
9109  }
9110 
9111  // Found non-null allocation.
9112  if(nextAlloc1stIndex < suballoc1stCount)
9113  {
9114  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9115 
9116  // 1. Process free space before this allocation.
9117  if(lastOffset < suballoc.offset)
9118  {
9119  // There is free space from lastOffset to suballoc.offset.
9120  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9121  ++outInfo.unusedRangeCount;
9122  outInfo.unusedBytes += unusedRangeSize;
9123  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9124  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9125  }
9126 
9127  // 2. Process this allocation.
9128  // There is allocation with suballoc.offset, suballoc.size.
9129  outInfo.usedBytes += suballoc.size;
9130  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9131  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9132 
9133  // 3. Prepare for next iteration.
9134  lastOffset = suballoc.offset + suballoc.size;
9135  ++nextAlloc1stIndex;
9136  }
9137  // We are at the end.
9138  else
9139  {
9140  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9141  if(lastOffset < freeSpace1stTo2ndEnd)
9142  {
9143  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9144  ++outInfo.unusedRangeCount;
9145  outInfo.unusedBytes += unusedRangeSize;
9146  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9147  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9148  }
9149 
9150  // End of loop.
9151  lastOffset = freeSpace1stTo2ndEnd;
9152  }
9153  }
9154 
9155  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9156  {
9157  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9158  while(lastOffset < size)
9159  {
9160  // Find next non-null allocation or move nextAllocIndex to the end.
9161  while(nextAlloc2ndIndex != SIZE_MAX &&
9162  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9163  {
9164  --nextAlloc2ndIndex;
9165  }
9166 
9167  // Found non-null allocation.
9168  if(nextAlloc2ndIndex != SIZE_MAX)
9169  {
9170  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9171 
9172  // 1. Process free space before this allocation.
9173  if(lastOffset < suballoc.offset)
9174  {
9175  // There is free space from lastOffset to suballoc.offset.
9176  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9177  ++outInfo.unusedRangeCount;
9178  outInfo.unusedBytes += unusedRangeSize;
9179  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9180  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9181  }
9182 
9183  // 2. Process this allocation.
9184  // There is allocation with suballoc.offset, suballoc.size.
9185  outInfo.usedBytes += suballoc.size;
9186  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9187  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9188 
9189  // 3. Prepare for next iteration.
9190  lastOffset = suballoc.offset + suballoc.size;
9191  --nextAlloc2ndIndex;
9192  }
9193  // We are at the end.
9194  else
9195  {
9196  // There is free space from lastOffset to size.
9197  if(lastOffset < size)
9198  {
9199  const VkDeviceSize unusedRangeSize = size - lastOffset;
9200  ++outInfo.unusedRangeCount;
9201  outInfo.unusedBytes += unusedRangeSize;
9202  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9203  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9204  }
9205 
9206  // End of loop.
9207  lastOffset = size;
9208  }
9209  }
9210  }
9211 
9212  outInfo.unusedBytes = size - outInfo.usedBytes;
9213 }
9214 
9215 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9216 {
9217  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9218  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9219  const VkDeviceSize size = GetSize();
9220  const size_t suballoc1stCount = suballocations1st.size();
9221  const size_t suballoc2ndCount = suballocations2nd.size();
9222 
9223  inoutStats.size += size;
9224 
9225  VkDeviceSize lastOffset = 0;
9226 
9227  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9228  {
9229  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9230  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9231  while(lastOffset < freeSpace2ndTo1stEnd)
9232  {
9233  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9234  while(nextAlloc2ndIndex < suballoc2ndCount &&
9235  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9236  {
9237  ++nextAlloc2ndIndex;
9238  }
9239 
9240  // Found non-null allocation.
9241  if(nextAlloc2ndIndex < suballoc2ndCount)
9242  {
9243  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9244 
9245  // 1. Process free space before this allocation.
9246  if(lastOffset < suballoc.offset)
9247  {
9248  // There is free space from lastOffset to suballoc.offset.
9249  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9250  inoutStats.unusedSize += unusedRangeSize;
9251  ++inoutStats.unusedRangeCount;
9252  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9253  }
9254 
9255  // 2. Process this allocation.
9256  // There is allocation with suballoc.offset, suballoc.size.
9257  ++inoutStats.allocationCount;
9258 
9259  // 3. Prepare for next iteration.
9260  lastOffset = suballoc.offset + suballoc.size;
9261  ++nextAlloc2ndIndex;
9262  }
9263  // We are at the end.
9264  else
9265  {
9266  if(lastOffset < freeSpace2ndTo1stEnd)
9267  {
9268  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9269  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9270  inoutStats.unusedSize += unusedRangeSize;
9271  ++inoutStats.unusedRangeCount;
9272  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9273  }
9274 
9275  // End of loop.
9276  lastOffset = freeSpace2ndTo1stEnd;
9277  }
9278  }
9279  }
9280 
9281  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9282  const VkDeviceSize freeSpace1stTo2ndEnd =
9283  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9284  while(lastOffset < freeSpace1stTo2ndEnd)
9285  {
9286  // Find next non-null allocation or move nextAllocIndex to the end.
9287  while(nextAlloc1stIndex < suballoc1stCount &&
9288  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9289  {
9290  ++nextAlloc1stIndex;
9291  }
9292 
9293  // Found non-null allocation.
9294  if(nextAlloc1stIndex < suballoc1stCount)
9295  {
9296  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9297 
9298  // 1. Process free space before this allocation.
9299  if(lastOffset < suballoc.offset)
9300  {
9301  // There is free space from lastOffset to suballoc.offset.
9302  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9303  inoutStats.unusedSize += unusedRangeSize;
9304  ++inoutStats.unusedRangeCount;
9305  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9306  }
9307 
9308  // 2. Process this allocation.
9309  // There is allocation with suballoc.offset, suballoc.size.
9310  ++inoutStats.allocationCount;
9311 
9312  // 3. Prepare for next iteration.
9313  lastOffset = suballoc.offset + suballoc.size;
9314  ++nextAlloc1stIndex;
9315  }
9316  // We are at the end.
9317  else
9318  {
9319  if(lastOffset < freeSpace1stTo2ndEnd)
9320  {
9321  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9322  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9323  inoutStats.unusedSize += unusedRangeSize;
9324  ++inoutStats.unusedRangeCount;
9325  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9326  }
9327 
9328  // End of loop.
9329  lastOffset = freeSpace1stTo2ndEnd;
9330  }
9331  }
9332 
9333  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9334  {
9335  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9336  while(lastOffset < size)
9337  {
9338  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9339  while(nextAlloc2ndIndex != SIZE_MAX &&
9340  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9341  {
9342  --nextAlloc2ndIndex;
9343  }
9344 
9345  // Found non-null allocation.
9346  if(nextAlloc2ndIndex != SIZE_MAX)
9347  {
9348  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9349 
9350  // 1. Process free space before this allocation.
9351  if(lastOffset < suballoc.offset)
9352  {
9353  // There is free space from lastOffset to suballoc.offset.
9354  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9355  inoutStats.unusedSize += unusedRangeSize;
9356  ++inoutStats.unusedRangeCount;
9357  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9358  }
9359 
9360  // 2. Process this allocation.
9361  // There is allocation with suballoc.offset, suballoc.size.
9362  ++inoutStats.allocationCount;
9363 
9364  // 3. Prepare for next iteration.
9365  lastOffset = suballoc.offset + suballoc.size;
9366  --nextAlloc2ndIndex;
9367  }
9368  // We are at the end.
9369  else
9370  {
9371  if(lastOffset < size)
9372  {
9373  // There is free space from lastOffset to size.
9374  const VkDeviceSize unusedRangeSize = size - lastOffset;
9375  inoutStats.unusedSize += unusedRangeSize;
9376  ++inoutStats.unusedRangeCount;
9377  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9378  }
9379 
9380  // End of loop.
9381  lastOffset = size;
9382  }
9383  }
9384  }
9385 }
9386 
9387 #if VMA_STATS_STRING_ENABLED
9388 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9389 {
9390  const VkDeviceSize size = GetSize();
9391  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9392  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9393  const size_t suballoc1stCount = suballocations1st.size();
9394  const size_t suballoc2ndCount = suballocations2nd.size();
9395 
9396  // FIRST PASS
9397 
9398  size_t unusedRangeCount = 0;
9399  VkDeviceSize usedBytes = 0;
9400 
9401  VkDeviceSize lastOffset = 0;
9402 
9403  size_t alloc2ndCount = 0;
9404  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9405  {
9406  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9407  size_t nextAlloc2ndIndex = 0;
9408  while(lastOffset < freeSpace2ndTo1stEnd)
9409  {
9410  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9411  while(nextAlloc2ndIndex < suballoc2ndCount &&
9412  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9413  {
9414  ++nextAlloc2ndIndex;
9415  }
9416 
9417  // Found non-null allocation.
9418  if(nextAlloc2ndIndex < suballoc2ndCount)
9419  {
9420  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9421 
9422  // 1. Process free space before this allocation.
9423  if(lastOffset < suballoc.offset)
9424  {
9425  // There is free space from lastOffset to suballoc.offset.
9426  ++unusedRangeCount;
9427  }
9428 
9429  // 2. Process this allocation.
9430  // There is allocation with suballoc.offset, suballoc.size.
9431  ++alloc2ndCount;
9432  usedBytes += suballoc.size;
9433 
9434  // 3. Prepare for next iteration.
9435  lastOffset = suballoc.offset + suballoc.size;
9436  ++nextAlloc2ndIndex;
9437  }
9438  // We are at the end.
9439  else
9440  {
9441  if(lastOffset < freeSpace2ndTo1stEnd)
9442  {
9443  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9444  ++unusedRangeCount;
9445  }
9446 
9447  // End of loop.
9448  lastOffset = freeSpace2ndTo1stEnd;
9449  }
9450  }
9451  }
9452 
9453  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9454  size_t alloc1stCount = 0;
9455  const VkDeviceSize freeSpace1stTo2ndEnd =
9456  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9457  while(lastOffset < freeSpace1stTo2ndEnd)
9458  {
9459  // Find next non-null allocation or move nextAllocIndex to the end.
9460  while(nextAlloc1stIndex < suballoc1stCount &&
9461  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9462  {
9463  ++nextAlloc1stIndex;
9464  }
9465 
9466  // Found non-null allocation.
9467  if(nextAlloc1stIndex < suballoc1stCount)
9468  {
9469  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9470 
9471  // 1. Process free space before this allocation.
9472  if(lastOffset < suballoc.offset)
9473  {
9474  // There is free space from lastOffset to suballoc.offset.
9475  ++unusedRangeCount;
9476  }
9477 
9478  // 2. Process this allocation.
9479  // There is allocation with suballoc.offset, suballoc.size.
9480  ++alloc1stCount;
9481  usedBytes += suballoc.size;
9482 
9483  // 3. Prepare for next iteration.
9484  lastOffset = suballoc.offset + suballoc.size;
9485  ++nextAlloc1stIndex;
9486  }
9487  // We are at the end.
9488  else
9489  {
9490  if(lastOffset < size)
9491  {
9492  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9493  ++unusedRangeCount;
9494  }
9495 
9496  // End of loop.
9497  lastOffset = freeSpace1stTo2ndEnd;
9498  }
9499  }
9500 
9501  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9502  {
9503  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9504  while(lastOffset < size)
9505  {
9506  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9507  while(nextAlloc2ndIndex != SIZE_MAX &&
9508  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9509  {
9510  --nextAlloc2ndIndex;
9511  }
9512 
9513  // Found non-null allocation.
9514  if(nextAlloc2ndIndex != SIZE_MAX)
9515  {
9516  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9517 
9518  // 1. Process free space before this allocation.
9519  if(lastOffset < suballoc.offset)
9520  {
9521  // There is free space from lastOffset to suballoc.offset.
9522  ++unusedRangeCount;
9523  }
9524 
9525  // 2. Process this allocation.
9526  // There is allocation with suballoc.offset, suballoc.size.
9527  ++alloc2ndCount;
9528  usedBytes += suballoc.size;
9529 
9530  // 3. Prepare for next iteration.
9531  lastOffset = suballoc.offset + suballoc.size;
9532  --nextAlloc2ndIndex;
9533  }
9534  // We are at the end.
9535  else
9536  {
9537  if(lastOffset < size)
9538  {
9539  // There is free space from lastOffset to size.
9540  ++unusedRangeCount;
9541  }
9542 
9543  // End of loop.
9544  lastOffset = size;
9545  }
9546  }
9547  }
9548 
9549  const VkDeviceSize unusedBytes = size - usedBytes;
9550  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9551 
9552  // SECOND PASS
9553  lastOffset = 0;
9554 
9555  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9556  {
9557  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9558  size_t nextAlloc2ndIndex = 0;
9559  while(lastOffset < freeSpace2ndTo1stEnd)
9560  {
9561  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9562  while(nextAlloc2ndIndex < suballoc2ndCount &&
9563  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9564  {
9565  ++nextAlloc2ndIndex;
9566  }
9567 
9568  // Found non-null allocation.
9569  if(nextAlloc2ndIndex < suballoc2ndCount)
9570  {
9571  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9572 
9573  // 1. Process free space before this allocation.
9574  if(lastOffset < suballoc.offset)
9575  {
9576  // There is free space from lastOffset to suballoc.offset.
9577  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9578  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9579  }
9580 
9581  // 2. Process this allocation.
9582  // There is allocation with suballoc.offset, suballoc.size.
9583  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9584 
9585  // 3. Prepare for next iteration.
9586  lastOffset = suballoc.offset + suballoc.size;
9587  ++nextAlloc2ndIndex;
9588  }
9589  // We are at the end.
9590  else
9591  {
9592  if(lastOffset < freeSpace2ndTo1stEnd)
9593  {
9594  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9595  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9596  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9597  }
9598 
9599  // End of loop.
9600  lastOffset = freeSpace2ndTo1stEnd;
9601  }
9602  }
9603  }
9604 
9605  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9606  while(lastOffset < freeSpace1stTo2ndEnd)
9607  {
9608  // Find next non-null allocation or move nextAllocIndex to the end.
9609  while(nextAlloc1stIndex < suballoc1stCount &&
9610  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9611  {
9612  ++nextAlloc1stIndex;
9613  }
9614 
9615  // Found non-null allocation.
9616  if(nextAlloc1stIndex < suballoc1stCount)
9617  {
9618  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9619 
9620  // 1. Process free space before this allocation.
9621  if(lastOffset < suballoc.offset)
9622  {
9623  // There is free space from lastOffset to suballoc.offset.
9624  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9625  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9626  }
9627 
9628  // 2. Process this allocation.
9629  // There is allocation with suballoc.offset, suballoc.size.
9630  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9631 
9632  // 3. Prepare for next iteration.
9633  lastOffset = suballoc.offset + suballoc.size;
9634  ++nextAlloc1stIndex;
9635  }
9636  // We are at the end.
9637  else
9638  {
9639  if(lastOffset < freeSpace1stTo2ndEnd)
9640  {
9641  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9642  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9644  }
9645 
9646  // End of loop.
9647  lastOffset = freeSpace1stTo2ndEnd;
9648  }
9649  }
9650 
9651  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9652  {
9653  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9654  while(lastOffset < size)
9655  {
9656  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9657  while(nextAlloc2ndIndex != SIZE_MAX &&
9658  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9659  {
9660  --nextAlloc2ndIndex;
9661  }
9662 
9663  // Found non-null allocation.
9664  if(nextAlloc2ndIndex != SIZE_MAX)
9665  {
9666  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9667 
9668  // 1. Process free space before this allocation.
9669  if(lastOffset < suballoc.offset)
9670  {
9671  // There is free space from lastOffset to suballoc.offset.
9672  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9673  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9674  }
9675 
9676  // 2. Process this allocation.
9677  // There is allocation with suballoc.offset, suballoc.size.
9678  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9679 
9680  // 3. Prepare for next iteration.
9681  lastOffset = suballoc.offset + suballoc.size;
9682  --nextAlloc2ndIndex;
9683  }
9684  // We are at the end.
9685  else
9686  {
9687  if(lastOffset < size)
9688  {
9689  // There is free space from lastOffset to size.
9690  const VkDeviceSize unusedRangeSize = size - lastOffset;
9691  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9692  }
9693 
9694  // End of loop.
9695  lastOffset = size;
9696  }
9697  }
9698  }
9699 
9700  PrintDetailedMap_End(json);
9701 }
9702 #endif // #if VMA_STATS_STRING_ENABLED
9703 
9704 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9705  uint32_t currentFrameIndex,
9706  uint32_t frameInUseCount,
9707  VkDeviceSize bufferImageGranularity,
9708  VkDeviceSize allocSize,
9709  VkDeviceSize allocAlignment,
9710  bool upperAddress,
9711  VmaSuballocationType allocType,
9712  bool canMakeOtherLost,
9713  uint32_t strategy,
9714  VmaAllocationRequest* pAllocationRequest)
9715 {
9716  VMA_ASSERT(allocSize > 0);
9717  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9718  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9719  VMA_HEAVY_ASSERT(Validate());
9720  return upperAddress ?
9721  CreateAllocationRequest_UpperAddress(
9722  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9723  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9724  CreateAllocationRequest_LowerAddress(
9725  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9726  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9727 }
9728 
9729 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9730  uint32_t currentFrameIndex,
9731  uint32_t frameInUseCount,
9732  VkDeviceSize bufferImageGranularity,
9733  VkDeviceSize allocSize,
9734  VkDeviceSize allocAlignment,
9735  VmaSuballocationType allocType,
9736  bool canMakeOtherLost,
9737  uint32_t strategy,
9738  VmaAllocationRequest* pAllocationRequest)
9739 {
9740  const VkDeviceSize size = GetSize();
9741  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9742  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9743 
9744  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9745  {
9746  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9747  return false;
9748  }
9749 
9750  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9751  if(allocSize > size)
9752  {
9753  return false;
9754  }
9755  VkDeviceSize resultBaseOffset = size - allocSize;
9756  if(!suballocations2nd.empty())
9757  {
9758  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9759  resultBaseOffset = lastSuballoc.offset - allocSize;
9760  if(allocSize > lastSuballoc.offset)
9761  {
9762  return false;
9763  }
9764  }
9765 
9766  // Start from offset equal to end of free space.
9767  VkDeviceSize resultOffset = resultBaseOffset;
9768 
9769  // Apply VMA_DEBUG_MARGIN at the end.
9770  if(VMA_DEBUG_MARGIN > 0)
9771  {
9772  if(resultOffset < VMA_DEBUG_MARGIN)
9773  {
9774  return false;
9775  }
9776  resultOffset -= VMA_DEBUG_MARGIN;
9777  }
9778 
9779  // Apply alignment.
9780  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9781 
9782  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9783  // Make bigger alignment if necessary.
9784  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9785  {
9786  bool bufferImageGranularityConflict = false;
9787  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9788  {
9789  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9790  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9791  {
9792  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9793  {
9794  bufferImageGranularityConflict = true;
9795  break;
9796  }
9797  }
9798  else
9799  // Already on previous page.
9800  break;
9801  }
9802  if(bufferImageGranularityConflict)
9803  {
9804  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9805  }
9806  }
9807 
9808  // There is enough free space.
9809  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9810  suballocations1st.back().offset + suballocations1st.back().size :
9811  0;
9812  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9813  {
9814  // Check previous suballocations for BufferImageGranularity conflicts.
9815  // If conflict exists, allocation cannot be made here.
9816  if(bufferImageGranularity > 1)
9817  {
9818  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9819  {
9820  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9821  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9822  {
9823  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9824  {
9825  return false;
9826  }
9827  }
9828  else
9829  {
9830  // Already on next page.
9831  break;
9832  }
9833  }
9834  }
9835 
9836  // All tests passed: Success.
9837  pAllocationRequest->offset = resultOffset;
9838  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9839  pAllocationRequest->sumItemSize = 0;
9840  // pAllocationRequest->item unused.
9841  pAllocationRequest->itemsToMakeLostCount = 0;
9842  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9843  return true;
9844  }
9845 
9846  return false;
9847 }
9848 
9849 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9850  uint32_t currentFrameIndex,
9851  uint32_t frameInUseCount,
9852  VkDeviceSize bufferImageGranularity,
9853  VkDeviceSize allocSize,
9854  VkDeviceSize allocAlignment,
9855  VmaSuballocationType allocType,
9856  bool canMakeOtherLost,
9857  uint32_t strategy,
9858  VmaAllocationRequest* pAllocationRequest)
9859 {
9860  const VkDeviceSize size = GetSize();
9861  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9862  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9863 
9864  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9865  {
9866  // Try to allocate at the end of 1st vector.
9867 
9868  VkDeviceSize resultBaseOffset = 0;
9869  if(!suballocations1st.empty())
9870  {
9871  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9872  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9873  }
9874 
9875  // Start from offset equal to beginning of free space.
9876  VkDeviceSize resultOffset = resultBaseOffset;
9877 
9878  // Apply VMA_DEBUG_MARGIN at the beginning.
9879  if(VMA_DEBUG_MARGIN > 0)
9880  {
9881  resultOffset += VMA_DEBUG_MARGIN;
9882  }
9883 
9884  // Apply alignment.
9885  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9886 
9887  // Check previous suballocations for BufferImageGranularity conflicts.
9888  // Make bigger alignment if necessary.
9889  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9890  {
9891  bool bufferImageGranularityConflict = false;
9892  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9893  {
9894  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9895  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9896  {
9897  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9898  {
9899  bufferImageGranularityConflict = true;
9900  break;
9901  }
9902  }
9903  else
9904  // Already on previous page.
9905  break;
9906  }
9907  if(bufferImageGranularityConflict)
9908  {
9909  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9910  }
9911  }
9912 
9913  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9914  suballocations2nd.back().offset : size;
9915 
9916  // There is enough free space at the end after alignment.
9917  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9918  {
9919  // Check next suballocations for BufferImageGranularity conflicts.
9920  // If conflict exists, allocation cannot be made here.
9921  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9922  {
9923  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9924  {
9925  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9926  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9927  {
9928  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9929  {
9930  return false;
9931  }
9932  }
9933  else
9934  {
9935  // Already on previous page.
9936  break;
9937  }
9938  }
9939  }
9940 
9941  // All tests passed: Success.
9942  pAllocationRequest->offset = resultOffset;
9943  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9944  pAllocationRequest->sumItemSize = 0;
9945  // pAllocationRequest->item, customData unused.
9946  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9947  pAllocationRequest->itemsToMakeLostCount = 0;
9948  return true;
9949  }
9950  }
9951 
9952  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9953  // beginning of 1st vector as the end of free space.
9954  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9955  {
9956  VMA_ASSERT(!suballocations1st.empty());
9957 
9958  VkDeviceSize resultBaseOffset = 0;
9959  if(!suballocations2nd.empty())
9960  {
9961  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9962  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9963  }
9964 
9965  // Start from offset equal to beginning of free space.
9966  VkDeviceSize resultOffset = resultBaseOffset;
9967 
9968  // Apply VMA_DEBUG_MARGIN at the beginning.
9969  if(VMA_DEBUG_MARGIN > 0)
9970  {
9971  resultOffset += VMA_DEBUG_MARGIN;
9972  }
9973 
9974  // Apply alignment.
9975  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9976 
9977  // Check previous suballocations for BufferImageGranularity conflicts.
9978  // Make bigger alignment if necessary.
9979  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9980  {
9981  bool bufferImageGranularityConflict = false;
9982  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9983  {
9984  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9985  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9986  {
9987  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9988  {
9989  bufferImageGranularityConflict = true;
9990  break;
9991  }
9992  }
9993  else
9994  // Already on previous page.
9995  break;
9996  }
9997  if(bufferImageGranularityConflict)
9998  {
9999  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10000  }
10001  }
10002 
10003  pAllocationRequest->itemsToMakeLostCount = 0;
10004  pAllocationRequest->sumItemSize = 0;
10005  size_t index1st = m_1stNullItemsBeginCount;
10006 
10007  if(canMakeOtherLost)
10008  {
10009  while(index1st < suballocations1st.size() &&
10010  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10011  {
10012  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10013  const VmaSuballocation& suballoc = suballocations1st[index1st];
10014  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10015  {
10016  // No problem.
10017  }
10018  else
10019  {
10020  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10021  if(suballoc.hAllocation->CanBecomeLost() &&
10022  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10023  {
10024  ++pAllocationRequest->itemsToMakeLostCount;
10025  pAllocationRequest->sumItemSize += suballoc.size;
10026  }
10027  else
10028  {
10029  return false;
10030  }
10031  }
10032  ++index1st;
10033  }
10034 
10035  // Check next suballocations for BufferImageGranularity conflicts.
10036  // If conflict exists, we must mark more allocations lost or fail.
10037  if(bufferImageGranularity > 1)
10038  {
10039  while(index1st < suballocations1st.size())
10040  {
10041  const VmaSuballocation& suballoc = suballocations1st[index1st];
10042  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10043  {
10044  if(suballoc.hAllocation != VK_NULL_HANDLE)
10045  {
10046  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10047  if(suballoc.hAllocation->CanBecomeLost() &&
10048  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10049  {
10050  ++pAllocationRequest->itemsToMakeLostCount;
10051  pAllocationRequest->sumItemSize += suballoc.size;
10052  }
10053  else
10054  {
10055  return false;
10056  }
10057  }
10058  }
10059  else
10060  {
10061  // Already on next page.
10062  break;
10063  }
10064  ++index1st;
10065  }
10066  }
10067 
10068  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10069  if(index1st == suballocations1st.size() &&
10070  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10071  {
10072  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10073  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10074  }
10075  }
10076 
10077  // There is enough free space at the end after alignment.
10078  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10079  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10080  {
10081  // Check next suballocations for BufferImageGranularity conflicts.
10082  // If conflict exists, allocation cannot be made here.
10083  if(bufferImageGranularity > 1)
10084  {
10085  for(size_t nextSuballocIndex = index1st;
10086  nextSuballocIndex < suballocations1st.size();
10087  nextSuballocIndex++)
10088  {
10089  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10090  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10091  {
10092  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10093  {
10094  return false;
10095  }
10096  }
10097  else
10098  {
10099  // Already on next page.
10100  break;
10101  }
10102  }
10103  }
10104 
10105  // All tests passed: Success.
10106  pAllocationRequest->offset = resultOffset;
10107  pAllocationRequest->sumFreeSize =
10108  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10109  - resultBaseOffset
10110  - pAllocationRequest->sumItemSize;
10111  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10112  // pAllocationRequest->item, customData unused.
10113  return true;
10114  }
10115  }
10116 
10117  return false;
10118 }
10119 
10120 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10121  uint32_t currentFrameIndex,
10122  uint32_t frameInUseCount,
10123  VmaAllocationRequest* pAllocationRequest)
10124 {
10125  if(pAllocationRequest->itemsToMakeLostCount == 0)
10126  {
10127  return true;
10128  }
10129 
10130  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10131 
10132  // We always start from 1st.
10133  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10134  size_t index = m_1stNullItemsBeginCount;
10135  size_t madeLostCount = 0;
10136  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10137  {
10138  if(index == suballocations->size())
10139  {
10140  index = 0;
10141  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10142  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10143  {
10144  suballocations = &AccessSuballocations2nd();
10145  }
10146  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10147  // suballocations continues pointing at AccessSuballocations1st().
10148  VMA_ASSERT(!suballocations->empty());
10149  }
10150  VmaSuballocation& suballoc = (*suballocations)[index];
10151  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10152  {
10153  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10154  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10155  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10156  {
10157  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10158  suballoc.hAllocation = VK_NULL_HANDLE;
10159  m_SumFreeSize += suballoc.size;
10160  if(suballocations == &AccessSuballocations1st())
10161  {
10162  ++m_1stNullItemsMiddleCount;
10163  }
10164  else
10165  {
10166  ++m_2ndNullItemsCount;
10167  }
10168  ++madeLostCount;
10169  }
10170  else
10171  {
10172  return false;
10173  }
10174  }
10175  ++index;
10176  }
10177 
10178  CleanupAfterFree();
10179  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10180 
10181  return true;
10182 }
10183 
10184 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10185 {
10186  uint32_t lostAllocationCount = 0;
10187 
10188  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10189  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10190  {
10191  VmaSuballocation& suballoc = suballocations1st[i];
10192  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10193  suballoc.hAllocation->CanBecomeLost() &&
10194  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10195  {
10196  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10197  suballoc.hAllocation = VK_NULL_HANDLE;
10198  ++m_1stNullItemsMiddleCount;
10199  m_SumFreeSize += suballoc.size;
10200  ++lostAllocationCount;
10201  }
10202  }
10203 
10204  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10205  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10206  {
10207  VmaSuballocation& suballoc = suballocations2nd[i];
10208  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10209  suballoc.hAllocation->CanBecomeLost() &&
10210  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10211  {
10212  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10213  suballoc.hAllocation = VK_NULL_HANDLE;
10214  ++m_2ndNullItemsCount;
10215  m_SumFreeSize += suballoc.size;
10216  ++lostAllocationCount;
10217  }
10218  }
10219 
10220  if(lostAllocationCount)
10221  {
10222  CleanupAfterFree();
10223  }
10224 
10225  return lostAllocationCount;
10226 }
10227 
10228 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10229 {
10230  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10231  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10232  {
10233  const VmaSuballocation& suballoc = suballocations1st[i];
10234  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10235  {
10236  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10237  {
10238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10239  return VK_ERROR_VALIDATION_FAILED_EXT;
10240  }
10241  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10242  {
10243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10244  return VK_ERROR_VALIDATION_FAILED_EXT;
10245  }
10246  }
10247  }
10248 
10249  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10250  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10251  {
10252  const VmaSuballocation& suballoc = suballocations2nd[i];
10253  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10254  {
10255  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10256  {
10257  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10258  return VK_ERROR_VALIDATION_FAILED_EXT;
10259  }
10260  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10261  {
10262  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10263  return VK_ERROR_VALIDATION_FAILED_EXT;
10264  }
10265  }
10266  }
10267 
10268  return VK_SUCCESS;
10269 }
10270 
10271 void VmaBlockMetadata_Linear::Alloc(
10272  const VmaAllocationRequest& request,
10273  VmaSuballocationType type,
10274  VkDeviceSize allocSize,
10275  VmaAllocation hAllocation)
10276 {
10277  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10278 
10279  switch(request.type)
10280  {
10281  case VmaAllocationRequestType::UpperAddress:
10282  {
10283  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10284  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10285  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10286  suballocations2nd.push_back(newSuballoc);
10287  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10288  }
10289  break;
10290  case VmaAllocationRequestType::EndOf1st:
10291  {
10292  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10293 
10294  VMA_ASSERT(suballocations1st.empty() ||
10295  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10296  // Check if it fits before the end of the block.
10297  VMA_ASSERT(request.offset + allocSize <= GetSize());
10298 
10299  suballocations1st.push_back(newSuballoc);
10300  }
10301  break;
10302  case VmaAllocationRequestType::EndOf2nd:
10303  {
10304  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10305  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10306  VMA_ASSERT(!suballocations1st.empty() &&
10307  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10308  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10309 
10310  switch(m_2ndVectorMode)
10311  {
10312  case SECOND_VECTOR_EMPTY:
10313  // First allocation from second part ring buffer.
10314  VMA_ASSERT(suballocations2nd.empty());
10315  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10316  break;
10317  case SECOND_VECTOR_RING_BUFFER:
10318  // 2-part ring buffer is already started.
10319  VMA_ASSERT(!suballocations2nd.empty());
10320  break;
10321  case SECOND_VECTOR_DOUBLE_STACK:
10322  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10323  break;
10324  default:
10325  VMA_ASSERT(0);
10326  }
10327 
10328  suballocations2nd.push_back(newSuballoc);
10329  }
10330  break;
10331  default:
10332  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10333  }
10334 
10335  m_SumFreeSize -= newSuballoc.size;
10336 }
10337 
10338 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10339 {
10340  FreeAtOffset(allocation->GetOffset());
10341 }
10342 
10343 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10344 {
10345  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10346  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10347 
10348  if(!suballocations1st.empty())
10349  {
10350  // First allocation: Mark it as next empty at the beginning.
10351  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10352  if(firstSuballoc.offset == offset)
10353  {
10354  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10355  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10356  m_SumFreeSize += firstSuballoc.size;
10357  ++m_1stNullItemsBeginCount;
10358  CleanupAfterFree();
10359  return;
10360  }
10361  }
10362 
10363  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10364  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10365  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10366  {
10367  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10368  if(lastSuballoc.offset == offset)
10369  {
10370  m_SumFreeSize += lastSuballoc.size;
10371  suballocations2nd.pop_back();
10372  CleanupAfterFree();
10373  return;
10374  }
10375  }
10376  // Last allocation in 1st vector.
10377  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10378  {
10379  VmaSuballocation& lastSuballoc = suballocations1st.back();
10380  if(lastSuballoc.offset == offset)
10381  {
10382  m_SumFreeSize += lastSuballoc.size;
10383  suballocations1st.pop_back();
10384  CleanupAfterFree();
10385  return;
10386  }
10387  }
10388 
10389  // Item from the middle of 1st vector.
10390  {
10391  VmaSuballocation refSuballoc;
10392  refSuballoc.offset = offset;
10393  // Rest of members stays uninitialized intentionally for better performance.
10394  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10395  suballocations1st.begin() + m_1stNullItemsBeginCount,
10396  suballocations1st.end(),
10397  refSuballoc,
10398  VmaSuballocationOffsetLess());
10399  if(it != suballocations1st.end())
10400  {
10401  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10402  it->hAllocation = VK_NULL_HANDLE;
10403  ++m_1stNullItemsMiddleCount;
10404  m_SumFreeSize += it->size;
10405  CleanupAfterFree();
10406  return;
10407  }
10408  }
10409 
10410  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10411  {
10412  // Item from the middle of 2nd vector.
10413  VmaSuballocation refSuballoc;
10414  refSuballoc.offset = offset;
10415  // Rest of members stays uninitialized intentionally for better performance.
10416  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10417  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10418  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10419  if(it != suballocations2nd.end())
10420  {
10421  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10422  it->hAllocation = VK_NULL_HANDLE;
10423  ++m_2ndNullItemsCount;
10424  m_SumFreeSize += it->size;
10425  CleanupAfterFree();
10426  return;
10427  }
10428  }
10429 
10430  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10431 }
10432 
10433 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10434 {
10435  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10436  const size_t suballocCount = AccessSuballocations1st().size();
10437  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10438 }
10439 
10440 void VmaBlockMetadata_Linear::CleanupAfterFree()
10441 {
10442  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10443  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10444 
10445  if(IsEmpty())
10446  {
10447  suballocations1st.clear();
10448  suballocations2nd.clear();
10449  m_1stNullItemsBeginCount = 0;
10450  m_1stNullItemsMiddleCount = 0;
10451  m_2ndNullItemsCount = 0;
10452  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10453  }
10454  else
10455  {
10456  const size_t suballoc1stCount = suballocations1st.size();
10457  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10458  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10459 
10460  // Find more null items at the beginning of 1st vector.
10461  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10462  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10463  {
10464  ++m_1stNullItemsBeginCount;
10465  --m_1stNullItemsMiddleCount;
10466  }
10467 
10468  // Find more null items at the end of 1st vector.
10469  while(m_1stNullItemsMiddleCount > 0 &&
10470  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10471  {
10472  --m_1stNullItemsMiddleCount;
10473  suballocations1st.pop_back();
10474  }
10475 
10476  // Find more null items at the end of 2nd vector.
10477  while(m_2ndNullItemsCount > 0 &&
10478  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10479  {
10480  --m_2ndNullItemsCount;
10481  suballocations2nd.pop_back();
10482  }
10483 
10484  // Find more null items at the beginning of 2nd vector.
10485  while(m_2ndNullItemsCount > 0 &&
10486  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10487  {
10488  --m_2ndNullItemsCount;
10489  VmaVectorRemove(suballocations2nd, 0);
10490  }
10491 
10492  if(ShouldCompact1st())
10493  {
10494  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10495  size_t srcIndex = m_1stNullItemsBeginCount;
10496  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10497  {
10498  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10499  {
10500  ++srcIndex;
10501  }
10502  if(dstIndex != srcIndex)
10503  {
10504  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10505  }
10506  ++srcIndex;
10507  }
10508  suballocations1st.resize(nonNullItemCount);
10509  m_1stNullItemsBeginCount = 0;
10510  m_1stNullItemsMiddleCount = 0;
10511  }
10512 
10513  // 2nd vector became empty.
10514  if(suballocations2nd.empty())
10515  {
10516  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10517  }
10518 
10519  // 1st vector became empty.
10520  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10521  {
10522  suballocations1st.clear();
10523  m_1stNullItemsBeginCount = 0;
10524 
10525  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10526  {
10527  // Swap 1st with 2nd. Now 2nd is empty.
10528  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10529  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10530  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10531  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10532  {
10533  ++m_1stNullItemsBeginCount;
10534  --m_1stNullItemsMiddleCount;
10535  }
10536  m_2ndNullItemsCount = 0;
10537  m_1stVectorIndex ^= 1;
10538  }
10539  }
10540  }
10541 
10542  VMA_HEAVY_ASSERT(Validate());
10543 }
10544 
10545 
10547 // class VmaBlockMetadata_Buddy
10548 
10549 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10550  VmaBlockMetadata(hAllocator),
10551  m_Root(VMA_NULL),
10552  m_AllocationCount(0),
10553  m_FreeCount(1),
10554  m_SumFreeSize(0)
10555 {
10556  memset(m_FreeList, 0, sizeof(m_FreeList));
10557 }
10558 
10559 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10560 {
10561  DeleteNode(m_Root);
10562 }
10563 
10564 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10565 {
10566  VmaBlockMetadata::Init(size);
10567 
10568  m_UsableSize = VmaPrevPow2(size);
10569  m_SumFreeSize = m_UsableSize;
10570 
10571  // Calculate m_LevelCount.
10572  m_LevelCount = 1;
10573  while(m_LevelCount < MAX_LEVELS &&
10574  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10575  {
10576  ++m_LevelCount;
10577  }
10578 
10579  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10580  rootNode->offset = 0;
10581  rootNode->type = Node::TYPE_FREE;
10582  rootNode->parent = VMA_NULL;
10583  rootNode->buddy = VMA_NULL;
10584 
10585  m_Root = rootNode;
10586  AddToFreeListFront(0, rootNode);
10587 }
10588 
10589 bool VmaBlockMetadata_Buddy::Validate() const
10590 {
10591  // Validate tree.
10592  ValidationContext ctx;
10593  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10594  {
10595  VMA_VALIDATE(false && "ValidateNode failed.");
10596  }
10597  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10598  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10599 
10600  // Validate free node lists.
10601  for(uint32_t level = 0; level < m_LevelCount; ++level)
10602  {
10603  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10604  m_FreeList[level].front->free.prev == VMA_NULL);
10605 
10606  for(Node* node = m_FreeList[level].front;
10607  node != VMA_NULL;
10608  node = node->free.next)
10609  {
10610  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10611 
10612  if(node->free.next == VMA_NULL)
10613  {
10614  VMA_VALIDATE(m_FreeList[level].back == node);
10615  }
10616  else
10617  {
10618  VMA_VALIDATE(node->free.next->free.prev == node);
10619  }
10620  }
10621  }
10622 
10623  // Validate that free lists ar higher levels are empty.
10624  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10625  {
10626  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10627  }
10628 
10629  return true;
10630 }
10631 
10632 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10633 {
10634  for(uint32_t level = 0; level < m_LevelCount; ++level)
10635  {
10636  if(m_FreeList[level].front != VMA_NULL)
10637  {
10638  return LevelToNodeSize(level);
10639  }
10640  }
10641  return 0;
10642 }
10643 
10644 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10645 {
10646  const VkDeviceSize unusableSize = GetUnusableSize();
10647 
10648  outInfo.blockCount = 1;
10649 
10650  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10651  outInfo.usedBytes = outInfo.unusedBytes = 0;
10652 
10653  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10654  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10655  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10656 
10657  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10658 
10659  if(unusableSize > 0)
10660  {
10661  ++outInfo.unusedRangeCount;
10662  outInfo.unusedBytes += unusableSize;
10663  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10664  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10665  }
10666 }
10667 
10668 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10669 {
10670  const VkDeviceSize unusableSize = GetUnusableSize();
10671 
10672  inoutStats.size += GetSize();
10673  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10674  inoutStats.allocationCount += m_AllocationCount;
10675  inoutStats.unusedRangeCount += m_FreeCount;
10676  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10677 
10678  if(unusableSize > 0)
10679  {
10680  ++inoutStats.unusedRangeCount;
10681  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10682  }
10683 }
10684 
10685 #if VMA_STATS_STRING_ENABLED
10686 
10687 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10688 {
10689  // TODO optimize
10690  VmaStatInfo stat;
10691  CalcAllocationStatInfo(stat);
10692 
10693  PrintDetailedMap_Begin(
10694  json,
10695  stat.unusedBytes,
10696  stat.allocationCount,
10697  stat.unusedRangeCount);
10698 
10699  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10700 
10701  const VkDeviceSize unusableSize = GetUnusableSize();
10702  if(unusableSize > 0)
10703  {
10704  PrintDetailedMap_UnusedRange(json,
10705  m_UsableSize, // offset
10706  unusableSize); // size
10707  }
10708 
10709  PrintDetailedMap_End(json);
10710 }
10711 
10712 #endif // #if VMA_STATS_STRING_ENABLED
10713 
10714 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10715  uint32_t currentFrameIndex,
10716  uint32_t frameInUseCount,
10717  VkDeviceSize bufferImageGranularity,
10718  VkDeviceSize allocSize,
10719  VkDeviceSize allocAlignment,
10720  bool upperAddress,
10721  VmaSuballocationType allocType,
10722  bool canMakeOtherLost,
10723  uint32_t strategy,
10724  VmaAllocationRequest* pAllocationRequest)
10725 {
10726  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10727 
10728  // Simple way to respect bufferImageGranularity. May be optimized some day.
10729  // Whenever it might be an OPTIMAL image...
10730  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10731  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10732  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10733  {
10734  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10735  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10736  }
10737 
10738  if(allocSize > m_UsableSize)
10739  {
10740  return false;
10741  }
10742 
10743  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10744  for(uint32_t level = targetLevel + 1; level--; )
10745  {
10746  for(Node* freeNode = m_FreeList[level].front;
10747  freeNode != VMA_NULL;
10748  freeNode = freeNode->free.next)
10749  {
10750  if(freeNode->offset % allocAlignment == 0)
10751  {
10752  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10753  pAllocationRequest->offset = freeNode->offset;
10754  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10755  pAllocationRequest->sumItemSize = 0;
10756  pAllocationRequest->itemsToMakeLostCount = 0;
10757  pAllocationRequest->customData = (void*)(uintptr_t)level;
10758  return true;
10759  }
10760  }
10761  }
10762 
10763  return false;
10764 }
10765 
10766 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10767  uint32_t currentFrameIndex,
10768  uint32_t frameInUseCount,
10769  VmaAllocationRequest* pAllocationRequest)
10770 {
10771  /*
10772  Lost allocations are not supported in buddy allocator at the moment.
10773  Support might be added in the future.
10774  */
10775  return pAllocationRequest->itemsToMakeLostCount == 0;
10776 }
10777 
10778 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10779 {
10780  /*
10781  Lost allocations are not supported in buddy allocator at the moment.
10782  Support might be added in the future.
10783  */
10784  return 0;
10785 }
10786 
10787 void VmaBlockMetadata_Buddy::Alloc(
10788  const VmaAllocationRequest& request,
10789  VmaSuballocationType type,
10790  VkDeviceSize allocSize,
10791  VmaAllocation hAllocation)
10792 {
10793  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10794 
10795  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10796  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10797 
10798  Node* currNode = m_FreeList[currLevel].front;
10799  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10800  while(currNode->offset != request.offset)
10801  {
10802  currNode = currNode->free.next;
10803  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10804  }
10805 
10806  // Go down, splitting free nodes.
10807  while(currLevel < targetLevel)
10808  {
10809  // currNode is already first free node at currLevel.
10810  // Remove it from list of free nodes at this currLevel.
10811  RemoveFromFreeList(currLevel, currNode);
10812 
10813  const uint32_t childrenLevel = currLevel + 1;
10814 
10815  // Create two free sub-nodes.
10816  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10817  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10818 
10819  leftChild->offset = currNode->offset;
10820  leftChild->type = Node::TYPE_FREE;
10821  leftChild->parent = currNode;
10822  leftChild->buddy = rightChild;
10823 
10824  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10825  rightChild->type = Node::TYPE_FREE;
10826  rightChild->parent = currNode;
10827  rightChild->buddy = leftChild;
10828 
10829  // Convert current currNode to split type.
10830  currNode->type = Node::TYPE_SPLIT;
10831  currNode->split.leftChild = leftChild;
10832 
10833  // Add child nodes to free list. Order is important!
10834  AddToFreeListFront(childrenLevel, rightChild);
10835  AddToFreeListFront(childrenLevel, leftChild);
10836 
10837  ++m_FreeCount;
10838  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10839  ++currLevel;
10840  currNode = m_FreeList[currLevel].front;
10841 
10842  /*
10843  We can be sure that currNode, as left child of node previously split,
10844  also fullfills the alignment requirement.
10845  */
10846  }
10847 
10848  // Remove from free list.
10849  VMA_ASSERT(currLevel == targetLevel &&
10850  currNode != VMA_NULL &&
10851  currNode->type == Node::TYPE_FREE);
10852  RemoveFromFreeList(currLevel, currNode);
10853 
10854  // Convert to allocation node.
10855  currNode->type = Node::TYPE_ALLOCATION;
10856  currNode->allocation.alloc = hAllocation;
10857 
10858  ++m_AllocationCount;
10859  --m_FreeCount;
10860  m_SumFreeSize -= allocSize;
10861 }
10862 
10863 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10864 {
10865  if(node->type == Node::TYPE_SPLIT)
10866  {
10867  DeleteNode(node->split.leftChild->buddy);
10868  DeleteNode(node->split.leftChild);
10869  }
10870 
10871  vma_delete(GetAllocationCallbacks(), node);
10872 }
10873 
10874 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10875 {
10876  VMA_VALIDATE(level < m_LevelCount);
10877  VMA_VALIDATE(curr->parent == parent);
10878  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10879  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10880  switch(curr->type)
10881  {
10882  case Node::TYPE_FREE:
10883  // curr->free.prev, next are validated separately.
10884  ctx.calculatedSumFreeSize += levelNodeSize;
10885  ++ctx.calculatedFreeCount;
10886  break;
10887  case Node::TYPE_ALLOCATION:
10888  ++ctx.calculatedAllocationCount;
10889  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10890  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10891  break;
10892  case Node::TYPE_SPLIT:
10893  {
10894  const uint32_t childrenLevel = level + 1;
10895  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10896  const Node* const leftChild = curr->split.leftChild;
10897  VMA_VALIDATE(leftChild != VMA_NULL);
10898  VMA_VALIDATE(leftChild->offset == curr->offset);
10899  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10900  {
10901  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10902  }
10903  const Node* const rightChild = leftChild->buddy;
10904  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10905  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10906  {
10907  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10908  }
10909  }
10910  break;
10911  default:
10912  return false;
10913  }
10914 
10915  return true;
10916 }
10917 
10918 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10919 {
10920  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10921  uint32_t level = 0;
10922  VkDeviceSize currLevelNodeSize = m_UsableSize;
10923  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10924  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10925  {
10926  ++level;
10927  currLevelNodeSize = nextLevelNodeSize;
10928  nextLevelNodeSize = currLevelNodeSize >> 1;
10929  }
10930  return level;
10931 }
10932 
10933 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10934 {
10935  // Find node and level.
10936  Node* node = m_Root;
10937  VkDeviceSize nodeOffset = 0;
10938  uint32_t level = 0;
10939  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10940  while(node->type == Node::TYPE_SPLIT)
10941  {
10942  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10943  if(offset < nodeOffset + nextLevelSize)
10944  {
10945  node = node->split.leftChild;
10946  }
10947  else
10948  {
10949  node = node->split.leftChild->buddy;
10950  nodeOffset += nextLevelSize;
10951  }
10952  ++level;
10953  levelNodeSize = nextLevelSize;
10954  }
10955 
10956  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10957  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10958 
10959  ++m_FreeCount;
10960  --m_AllocationCount;
10961  m_SumFreeSize += alloc->GetSize();
10962 
10963  node->type = Node::TYPE_FREE;
10964 
10965  // Join free nodes if possible.
10966  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10967  {
10968  RemoveFromFreeList(level, node->buddy);
10969  Node* const parent = node->parent;
10970 
10971  vma_delete(GetAllocationCallbacks(), node->buddy);
10972  vma_delete(GetAllocationCallbacks(), node);
10973  parent->type = Node::TYPE_FREE;
10974 
10975  node = parent;
10976  --level;
10977  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10978  --m_FreeCount;
10979  }
10980 
10981  AddToFreeListFront(level, node);
10982 }
10983 
10984 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10985 {
10986  switch(node->type)
10987  {
10988  case Node::TYPE_FREE:
10989  ++outInfo.unusedRangeCount;
10990  outInfo.unusedBytes += levelNodeSize;
10991  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10992  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10993  break;
10994  case Node::TYPE_ALLOCATION:
10995  {
10996  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10997  ++outInfo.allocationCount;
10998  outInfo.usedBytes += allocSize;
10999  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11000  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11001 
11002  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11003  if(unusedRangeSize > 0)
11004  {
11005  ++outInfo.unusedRangeCount;
11006  outInfo.unusedBytes += unusedRangeSize;
11007  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11008  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11009  }
11010  }
11011  break;
11012  case Node::TYPE_SPLIT:
11013  {
11014  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11015  const Node* const leftChild = node->split.leftChild;
11016  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11017  const Node* const rightChild = leftChild->buddy;
11018  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11019  }
11020  break;
11021  default:
11022  VMA_ASSERT(0);
11023  }
11024 }
11025 
11026 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11027 {
11028  VMA_ASSERT(node->type == Node::TYPE_FREE);
11029 
11030  // List is empty.
11031  Node* const frontNode = m_FreeList[level].front;
11032  if(frontNode == VMA_NULL)
11033  {
11034  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11035  node->free.prev = node->free.next = VMA_NULL;
11036  m_FreeList[level].front = m_FreeList[level].back = node;
11037  }
11038  else
11039  {
11040  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11041  node->free.prev = VMA_NULL;
11042  node->free.next = frontNode;
11043  frontNode->free.prev = node;
11044  m_FreeList[level].front = node;
11045  }
11046 }
11047 
11048 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11049 {
11050  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11051 
11052  // It is at the front.
11053  if(node->free.prev == VMA_NULL)
11054  {
11055  VMA_ASSERT(m_FreeList[level].front == node);
11056  m_FreeList[level].front = node->free.next;
11057  }
11058  else
11059  {
11060  Node* const prevFreeNode = node->free.prev;
11061  VMA_ASSERT(prevFreeNode->free.next == node);
11062  prevFreeNode->free.next = node->free.next;
11063  }
11064 
11065  // It is at the back.
11066  if(node->free.next == VMA_NULL)
11067  {
11068  VMA_ASSERT(m_FreeList[level].back == node);
11069  m_FreeList[level].back = node->free.prev;
11070  }
11071  else
11072  {
11073  Node* const nextFreeNode = node->free.next;
11074  VMA_ASSERT(nextFreeNode->free.prev == node);
11075  nextFreeNode->free.prev = node->free.prev;
11076  }
11077 }
11078 
11079 #if VMA_STATS_STRING_ENABLED
11080 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11081 {
11082  switch(node->type)
11083  {
11084  case Node::TYPE_FREE:
11085  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11086  break;
11087  case Node::TYPE_ALLOCATION:
11088  {
11089  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11090  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11091  if(allocSize < levelNodeSize)
11092  {
11093  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11094  }
11095  }
11096  break;
11097  case Node::TYPE_SPLIT:
11098  {
11099  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11100  const Node* const leftChild = node->split.leftChild;
11101  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11102  const Node* const rightChild = leftChild->buddy;
11103  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11104  }
11105  break;
11106  default:
11107  VMA_ASSERT(0);
11108  }
11109 }
11110 #endif // #if VMA_STATS_STRING_ENABLED
11111 
11112 
11114 // class VmaDeviceMemoryBlock
11115 
11116 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11117  m_pMetadata(VMA_NULL),
11118  m_MemoryTypeIndex(UINT32_MAX),
11119  m_Id(0),
11120  m_hMemory(VK_NULL_HANDLE),
11121  m_MapCount(0),
11122  m_pMappedData(VMA_NULL)
11123 {
11124 }
11125 
11126 void VmaDeviceMemoryBlock::Init(
11127  VmaAllocator hAllocator,
11128  VmaPool hParentPool,
11129  uint32_t newMemoryTypeIndex,
11130  VkDeviceMemory newMemory,
11131  VkDeviceSize newSize,
11132  uint32_t id,
11133  uint32_t algorithm)
11134 {
11135  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11136 
11137  m_hParentPool = hParentPool;
11138  m_MemoryTypeIndex = newMemoryTypeIndex;
11139  m_Id = id;
11140  m_hMemory = newMemory;
11141 
11142  switch(algorithm)
11143  {
11145  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11146  break;
11148  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11149  break;
11150  default:
11151  VMA_ASSERT(0);
11152  // Fall-through.
11153  case 0:
11154  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11155  }
11156  m_pMetadata->Init(newSize);
11157 }
11158 
11159 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11160 {
11161  // This is the most important assert in the entire library.
11162  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11163  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11164 
11165  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11166  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11167  m_hMemory = VK_NULL_HANDLE;
11168 
11169  vma_delete(allocator, m_pMetadata);
11170  m_pMetadata = VMA_NULL;
11171 }
11172 
11173 bool VmaDeviceMemoryBlock::Validate() const
11174 {
11175  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11176  (m_pMetadata->GetSize() != 0));
11177 
11178  return m_pMetadata->Validate();
11179 }
11180 
11181 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11182 {
11183  void* pData = nullptr;
11184  VkResult res = Map(hAllocator, 1, &pData);
11185  if(res != VK_SUCCESS)
11186  {
11187  return res;
11188  }
11189 
11190  res = m_pMetadata->CheckCorruption(pData);
11191 
11192  Unmap(hAllocator, 1);
11193 
11194  return res;
11195 }
11196 
11197 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11198 {
11199  if(count == 0)
11200  {
11201  return VK_SUCCESS;
11202  }
11203 
11204  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11205  if(m_MapCount != 0)
11206  {
11207  m_MapCount += count;
11208  VMA_ASSERT(m_pMappedData != VMA_NULL);
11209  if(ppData != VMA_NULL)
11210  {
11211  *ppData = m_pMappedData;
11212  }
11213  return VK_SUCCESS;
11214  }
11215  else
11216  {
11217  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11218  hAllocator->m_hDevice,
11219  m_hMemory,
11220  0, // offset
11221  VK_WHOLE_SIZE,
11222  0, // flags
11223  &m_pMappedData);
11224  if(result == VK_SUCCESS)
11225  {
11226  if(ppData != VMA_NULL)
11227  {
11228  *ppData = m_pMappedData;
11229  }
11230  m_MapCount = count;
11231  }
11232  return result;
11233  }
11234 }
11235 
11236 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11237 {
11238  if(count == 0)
11239  {
11240  return;
11241  }
11242 
11243  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11244  if(m_MapCount >= count)
11245  {
11246  m_MapCount -= count;
11247  if(m_MapCount == 0)
11248  {
11249  m_pMappedData = VMA_NULL;
11250  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11251  }
11252  }
11253  else
11254  {
11255  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11256  }
11257 }
11258 
11259 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11260 {
11261  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11262  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11263 
11264  void* pData;
11265  VkResult res = Map(hAllocator, 1, &pData);
11266  if(res != VK_SUCCESS)
11267  {
11268  return res;
11269  }
11270 
11271  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11272  VmaWriteMagicValue(pData, allocOffset + allocSize);
11273 
11274  Unmap(hAllocator, 1);
11275 
11276  return VK_SUCCESS;
11277 }
11278 
11279 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11280 {
11281  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11282  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11283 
11284  void* pData;
11285  VkResult res = Map(hAllocator, 1, &pData);
11286  if(res != VK_SUCCESS)
11287  {
11288  return res;
11289  }
11290 
11291  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11292  {
11293  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11294  }
11295  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11296  {
11297  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11298  }
11299 
11300  Unmap(hAllocator, 1);
11301 
11302  return VK_SUCCESS;
11303 }
11304 
11305 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11306  const VmaAllocator hAllocator,
11307  const VmaAllocation hAllocation,
11308  VkDeviceSize allocationLocalOffset,
11309  VkBuffer hBuffer,
11310  const void* pNext)
11311 {
11312  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11313  hAllocation->GetBlock() == this);
11314  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11315  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11316  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11317  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11318  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11319  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11320 }
11321 
11322 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11323  const VmaAllocator hAllocator,
11324  const VmaAllocation hAllocation,
11325  VkDeviceSize allocationLocalOffset,
11326  VkImage hImage,
11327  const void* pNext)
11328 {
11329  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11330  hAllocation->GetBlock() == this);
11331  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11332  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11333  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11334  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11335  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11336  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11337 }
11338 
11339 static void InitStatInfo(VmaStatInfo& outInfo)
11340 {
11341  memset(&outInfo, 0, sizeof(outInfo));
11342  outInfo.allocationSizeMin = UINT64_MAX;
11343  outInfo.unusedRangeSizeMin = UINT64_MAX;
11344 }
11345 
11346 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11347 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11348 {
11349  inoutInfo.blockCount += srcInfo.blockCount;
11350  inoutInfo.allocationCount += srcInfo.allocationCount;
11351  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11352  inoutInfo.usedBytes += srcInfo.usedBytes;
11353  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11354  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11355  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11356  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11357  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11358 }
11359 
11360 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11361 {
11362  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11363  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11364  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11365  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11366 }
11367 
11368 VmaPool_T::VmaPool_T(
11369  VmaAllocator hAllocator,
11370  const VmaPoolCreateInfo& createInfo,
11371  VkDeviceSize preferredBlockSize) :
11372  m_BlockVector(
11373  hAllocator,
11374  this, // hParentPool
11375  createInfo.memoryTypeIndex,
11376  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11377  createInfo.minBlockCount,
11378  createInfo.maxBlockCount,
11379  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11380  createInfo.frameInUseCount,
11381  true, // isCustomPool
11382  createInfo.blockSize != 0, // explicitBlockSize
11383  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11384  m_Id(0)
11385 {
11386 }
11387 
11388 VmaPool_T::~VmaPool_T()
11389 {
11390 }
11391 
11392 #if VMA_STATS_STRING_ENABLED
11393 
11394 #endif // #if VMA_STATS_STRING_ENABLED
11395 
11396 VmaBlockVector::VmaBlockVector(
11397  VmaAllocator hAllocator,
11398  VmaPool hParentPool,
11399  uint32_t memoryTypeIndex,
11400  VkDeviceSize preferredBlockSize,
11401  size_t minBlockCount,
11402  size_t maxBlockCount,
11403  VkDeviceSize bufferImageGranularity,
11404  uint32_t frameInUseCount,
11405  bool isCustomPool,
11406  bool explicitBlockSize,
11407  uint32_t algorithm) :
11408  m_hAllocator(hAllocator),
11409  m_hParentPool(hParentPool),
11410  m_MemoryTypeIndex(memoryTypeIndex),
11411  m_PreferredBlockSize(preferredBlockSize),
11412  m_MinBlockCount(minBlockCount),
11413  m_MaxBlockCount(maxBlockCount),
11414  m_BufferImageGranularity(bufferImageGranularity),
11415  m_FrameInUseCount(frameInUseCount),
11416  m_IsCustomPool(isCustomPool),
11417  m_ExplicitBlockSize(explicitBlockSize),
11418  m_Algorithm(algorithm),
11419  m_HasEmptyBlock(false),
11420  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11421  m_NextBlockId(0)
11422 {
11423 }
11424 
11425 VmaBlockVector::~VmaBlockVector()
11426 {
11427  for(size_t i = m_Blocks.size(); i--; )
11428  {
11429  m_Blocks[i]->Destroy(m_hAllocator);
11430  vma_delete(m_hAllocator, m_Blocks[i]);
11431  }
11432 }
11433 
11434 VkResult VmaBlockVector::CreateMinBlocks()
11435 {
11436  for(size_t i = 0; i < m_MinBlockCount; ++i)
11437  {
11438  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11439  if(res != VK_SUCCESS)
11440  {
11441  return res;
11442  }
11443  }
11444  return VK_SUCCESS;
11445 }
11446 
11447 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11448 {
11449  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11450 
11451  const size_t blockCount = m_Blocks.size();
11452 
11453  pStats->size = 0;
11454  pStats->unusedSize = 0;
11455  pStats->allocationCount = 0;
11456  pStats->unusedRangeCount = 0;
11457  pStats->unusedRangeSizeMax = 0;
11458  pStats->blockCount = blockCount;
11459 
11460  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11461  {
11462  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11463  VMA_ASSERT(pBlock);
11464  VMA_HEAVY_ASSERT(pBlock->Validate());
11465  pBlock->m_pMetadata->AddPoolStats(*pStats);
11466  }
11467 }
11468 
11469 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11470 {
11471  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11472  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11473  (VMA_DEBUG_MARGIN > 0) &&
11474  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11475  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11476 }
11477 
11478 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11479 
11480 VkResult VmaBlockVector::Allocate(
11481  uint32_t currentFrameIndex,
11482  VkDeviceSize size,
11483  VkDeviceSize alignment,
11484  const VmaAllocationCreateInfo& createInfo,
11485  VmaSuballocationType suballocType,
11486  size_t allocationCount,
11487  VmaAllocation* pAllocations)
11488 {
11489  size_t allocIndex;
11490  VkResult res = VK_SUCCESS;
11491 
11492  if(IsCorruptionDetectionEnabled())
11493  {
11494  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11495  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11496  }
11497 
11498  {
11499  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11500  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11501  {
11502  res = AllocatePage(
11503  currentFrameIndex,
11504  size,
11505  alignment,
11506  createInfo,
11507  suballocType,
11508  pAllocations + allocIndex);
11509  if(res != VK_SUCCESS)
11510  {
11511  break;
11512  }
11513  }
11514  }
11515 
11516  if(res != VK_SUCCESS)
11517  {
11518  // Free all already created allocations.
11519  while(allocIndex--)
11520  {
11521  Free(pAllocations[allocIndex]);
11522  }
11523  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11524  }
11525 
11526  return res;
11527 }
11528 
11529 VkResult VmaBlockVector::AllocatePage(
11530  uint32_t currentFrameIndex,
11531  VkDeviceSize size,
11532  VkDeviceSize alignment,
11533  const VmaAllocationCreateInfo& createInfo,
11534  VmaSuballocationType suballocType,
11535  VmaAllocation* pAllocation)
11536 {
11537  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11538  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11539  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11540  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11541  const bool canCreateNewBlock =
11542  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11543  (m_Blocks.size() < m_MaxBlockCount);
11544  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11545 
11546  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11547  // Which in turn is available only when maxBlockCount = 1.
11548  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11549  {
11550  canMakeOtherLost = false;
11551  }
11552 
11553  // Upper address can only be used with linear allocator and within single memory block.
11554  if(isUpperAddress &&
11555  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11556  {
11557  return VK_ERROR_FEATURE_NOT_PRESENT;
11558  }
11559 
11560  // Validate strategy.
11561  switch(strategy)
11562  {
11563  case 0:
11565  break;
11569  break;
11570  default:
11571  return VK_ERROR_FEATURE_NOT_PRESENT;
11572  }
11573 
11574  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11575  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11576  {
11577  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11578  }
11579 
11580  /*
11581  Under certain condition, this whole section can be skipped for optimization, so
11582  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11583  e.g. for custom pools with linear algorithm.
11584  */
11585  if(!canMakeOtherLost || canCreateNewBlock)
11586  {
11587  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11588  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11590 
11591  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11592  {
11593  // Use only last block.
11594  if(!m_Blocks.empty())
11595  {
11596  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11597  VMA_ASSERT(pCurrBlock);
11598  VkResult res = AllocateFromBlock(
11599  pCurrBlock,
11600  currentFrameIndex,
11601  size,
11602  alignment,
11603  allocFlagsCopy,
11604  createInfo.pUserData,
11605  suballocType,
11606  strategy,
11607  pAllocation);
11608  if(res == VK_SUCCESS)
11609  {
11610  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11611  return VK_SUCCESS;
11612  }
11613  }
11614  }
11615  else
11616  {
11618  {
11619  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11620  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11621  {
11622  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11623  VMA_ASSERT(pCurrBlock);
11624  VkResult res = AllocateFromBlock(
11625  pCurrBlock,
11626  currentFrameIndex,
11627  size,
11628  alignment,
11629  allocFlagsCopy,
11630  createInfo.pUserData,
11631  suballocType,
11632  strategy,
11633  pAllocation);
11634  if(res == VK_SUCCESS)
11635  {
11636  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11637  return VK_SUCCESS;
11638  }
11639  }
11640  }
11641  else // WORST_FIT, FIRST_FIT
11642  {
11643  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11644  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11645  {
11646  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11647  VMA_ASSERT(pCurrBlock);
11648  VkResult res = AllocateFromBlock(
11649  pCurrBlock,
11650  currentFrameIndex,
11651  size,
11652  alignment,
11653  allocFlagsCopy,
11654  createInfo.pUserData,
11655  suballocType,
11656  strategy,
11657  pAllocation);
11658  if(res == VK_SUCCESS)
11659  {
11660  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11661  return VK_SUCCESS;
11662  }
11663  }
11664  }
11665  }
11666 
11667  // 2. Try to create new block.
11668  if(canCreateNewBlock)
11669  {
11670  // Calculate optimal size for new block.
11671  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11672  uint32_t newBlockSizeShift = 0;
11673  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11674 
11675  if(!m_ExplicitBlockSize)
11676  {
11677  // Allocate 1/8, 1/4, 1/2 as first blocks.
11678  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11679  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11680  {
11681  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11682  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11683  {
11684  newBlockSize = smallerNewBlockSize;
11685  ++newBlockSizeShift;
11686  }
11687  else
11688  {
11689  break;
11690  }
11691  }
11692  }
11693 
11694  size_t newBlockIndex = 0;
11695  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11696  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11697  if(!m_ExplicitBlockSize)
11698  {
11699  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11700  {
11701  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11702  if(smallerNewBlockSize >= size)
11703  {
11704  newBlockSize = smallerNewBlockSize;
11705  ++newBlockSizeShift;
11706  res = CreateBlock(newBlockSize, &newBlockIndex);
11707  }
11708  else
11709  {
11710  break;
11711  }
11712  }
11713  }
11714 
11715  if(res == VK_SUCCESS)
11716  {
11717  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11718  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11719 
11720  res = AllocateFromBlock(
11721  pBlock,
11722  currentFrameIndex,
11723  size,
11724  alignment,
11725  allocFlagsCopy,
11726  createInfo.pUserData,
11727  suballocType,
11728  strategy,
11729  pAllocation);
11730  if(res == VK_SUCCESS)
11731  {
11732  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11733  return VK_SUCCESS;
11734  }
11735  else
11736  {
11737  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11738  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11739  }
11740  }
11741  }
11742  }
11743 
11744  // 3. Try to allocate from existing blocks with making other allocations lost.
11745  if(canMakeOtherLost)
11746  {
11747  uint32_t tryIndex = 0;
11748  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11749  {
11750  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11751  VmaAllocationRequest bestRequest = {};
11752  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11753 
11754  // 1. Search existing allocations.
11756  {
11757  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11758  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11759  {
11760  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11761  VMA_ASSERT(pCurrBlock);
11762  VmaAllocationRequest currRequest = {};
11763  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11764  currentFrameIndex,
11765  m_FrameInUseCount,
11766  m_BufferImageGranularity,
11767  size,
11768  alignment,
11769  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11770  suballocType,
11771  canMakeOtherLost,
11772  strategy,
11773  &currRequest))
11774  {
11775  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11776  if(pBestRequestBlock == VMA_NULL ||
11777  currRequestCost < bestRequestCost)
11778  {
11779  pBestRequestBlock = pCurrBlock;
11780  bestRequest = currRequest;
11781  bestRequestCost = currRequestCost;
11782 
11783  if(bestRequestCost == 0)
11784  {
11785  break;
11786  }
11787  }
11788  }
11789  }
11790  }
11791  else // WORST_FIT, FIRST_FIT
11792  {
11793  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11794  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11795  {
11796  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11797  VMA_ASSERT(pCurrBlock);
11798  VmaAllocationRequest currRequest = {};
11799  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11800  currentFrameIndex,
11801  m_FrameInUseCount,
11802  m_BufferImageGranularity,
11803  size,
11804  alignment,
11805  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11806  suballocType,
11807  canMakeOtherLost,
11808  strategy,
11809  &currRequest))
11810  {
11811  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11812  if(pBestRequestBlock == VMA_NULL ||
11813  currRequestCost < bestRequestCost ||
11815  {
11816  pBestRequestBlock = pCurrBlock;
11817  bestRequest = currRequest;
11818  bestRequestCost = currRequestCost;
11819 
11820  if(bestRequestCost == 0 ||
11822  {
11823  break;
11824  }
11825  }
11826  }
11827  }
11828  }
11829 
11830  if(pBestRequestBlock != VMA_NULL)
11831  {
11832  if(mapped)
11833  {
11834  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11835  if(res != VK_SUCCESS)
11836  {
11837  return res;
11838  }
11839  }
11840 
11841  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11842  currentFrameIndex,
11843  m_FrameInUseCount,
11844  &bestRequest))
11845  {
11846  // We no longer have an empty Allocation.
11847  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11848  {
11849  m_HasEmptyBlock = false;
11850  }
11851  // Allocate from this pBlock.
11852  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11853  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11854  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11855  (*pAllocation)->InitBlockAllocation(
11856  pBestRequestBlock,
11857  bestRequest.offset,
11858  alignment,
11859  size,
11860  suballocType,
11861  mapped,
11862  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11863  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11864  VMA_DEBUG_LOG(" Returned from existing block");
11865  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11866  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11867  {
11868  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11869  }
11870  if(IsCorruptionDetectionEnabled())
11871  {
11872  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11873  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11874  }
11875  return VK_SUCCESS;
11876  }
11877  // else: Some allocations must have been touched while we are here. Next try.
11878  }
11879  else
11880  {
11881  // Could not find place in any of the blocks - break outer loop.
11882  break;
11883  }
11884  }
11885  /* Maximum number of tries exceeded - a very unlike event when many other
11886  threads are simultaneously touching allocations making it impossible to make
11887  lost at the same time as we try to allocate. */
11888  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11889  {
11890  return VK_ERROR_TOO_MANY_OBJECTS;
11891  }
11892  }
11893 
11894  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11895 }
11896 
11897 void VmaBlockVector::Free(
11898  VmaAllocation hAllocation)
11899 {
11900  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11901 
11902  // Scope for lock.
11903  {
11904  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11905 
11906  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11907 
11908  if(IsCorruptionDetectionEnabled())
11909  {
11910  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11911  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11912  }
11913 
11914  if(hAllocation->IsPersistentMap())
11915  {
11916  pBlock->Unmap(m_hAllocator, 1);
11917  }
11918 
11919  pBlock->m_pMetadata->Free(hAllocation);
11920  VMA_HEAVY_ASSERT(pBlock->Validate());
11921 
11922  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11923 
11924  // pBlock became empty after this deallocation.
11925  if(pBlock->m_pMetadata->IsEmpty())
11926  {
11927  // Already has empty Allocation. We don't want to have two, so delete this one.
11928  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11929  {
11930  pBlockToDelete = pBlock;
11931  Remove(pBlock);
11932  }
11933  // We now have first empty block.
11934  else
11935  {
11936  m_HasEmptyBlock = true;
11937  }
11938  }
11939  // pBlock didn't become empty, but we have another empty block - find and free that one.
11940  // (This is optional, heuristics.)
11941  else if(m_HasEmptyBlock)
11942  {
11943  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11944  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11945  {
11946  pBlockToDelete = pLastBlock;
11947  m_Blocks.pop_back();
11948  m_HasEmptyBlock = false;
11949  }
11950  }
11951 
11952  IncrementallySortBlocks();
11953  }
11954 
11955  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11956  // lock, for performance reason.
11957  if(pBlockToDelete != VMA_NULL)
11958  {
11959  VMA_DEBUG_LOG(" Deleted empty allocation");
11960  pBlockToDelete->Destroy(m_hAllocator);
11961  vma_delete(m_hAllocator, pBlockToDelete);
11962  }
11963 }
11964 
11965 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11966 {
11967  VkDeviceSize result = 0;
11968  for(size_t i = m_Blocks.size(); i--; )
11969  {
11970  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11971  if(result >= m_PreferredBlockSize)
11972  {
11973  break;
11974  }
11975  }
11976  return result;
11977 }
11978 
11979 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11980 {
11981  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11982  {
11983  if(m_Blocks[blockIndex] == pBlock)
11984  {
11985  VmaVectorRemove(m_Blocks, blockIndex);
11986  return;
11987  }
11988  }
11989  VMA_ASSERT(0);
11990 }
11991 
11992 void VmaBlockVector::IncrementallySortBlocks()
11993 {
11994  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11995  {
11996  // Bubble sort only until first swap.
11997  for(size_t i = 1; i < m_Blocks.size(); ++i)
11998  {
11999  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12000  {
12001  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12002  return;
12003  }
12004  }
12005  }
12006 }
12007 
12008 VkResult VmaBlockVector::AllocateFromBlock(
12009  VmaDeviceMemoryBlock* pBlock,
12010  uint32_t currentFrameIndex,
12011  VkDeviceSize size,
12012  VkDeviceSize alignment,
12013  VmaAllocationCreateFlags allocFlags,
12014  void* pUserData,
12015  VmaSuballocationType suballocType,
12016  uint32_t strategy,
12017  VmaAllocation* pAllocation)
12018 {
12019  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12020  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12021  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12022  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12023 
12024  VmaAllocationRequest currRequest = {};
12025  if(pBlock->m_pMetadata->CreateAllocationRequest(
12026  currentFrameIndex,
12027  m_FrameInUseCount,
12028  m_BufferImageGranularity,
12029  size,
12030  alignment,
12031  isUpperAddress,
12032  suballocType,
12033  false, // canMakeOtherLost
12034  strategy,
12035  &currRequest))
12036  {
12037  // Allocate from pCurrBlock.
12038  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12039 
12040  if(mapped)
12041  {
12042  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12043  if(res != VK_SUCCESS)
12044  {
12045  return res;
12046  }
12047  }
12048 
12049  // We no longer have an empty Allocation.
12050  if(pBlock->m_pMetadata->IsEmpty())
12051  {
12052  m_HasEmptyBlock = false;
12053  }
12054 
12055  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12056  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12057  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12058  (*pAllocation)->InitBlockAllocation(
12059  pBlock,
12060  currRequest.offset,
12061  alignment,
12062  size,
12063  suballocType,
12064  mapped,
12065  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12066  VMA_HEAVY_ASSERT(pBlock->Validate());
12067  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12068  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12069  {
12070  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12071  }
12072  if(IsCorruptionDetectionEnabled())
12073  {
12074  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12075  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12076  }
12077  return VK_SUCCESS;
12078  }
12079  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12080 }
12081 
12082 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12083 {
12084  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12085  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12086  allocInfo.allocationSize = blockSize;
12087  VkDeviceMemory mem = VK_NULL_HANDLE;
12088  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12089  if(res < 0)
12090  {
12091  return res;
12092  }
12093 
12094  // New VkDeviceMemory successfully created.
12095 
12096  // Create new Allocation for it.
12097  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12098  pBlock->Init(
12099  m_hAllocator,
12100  m_hParentPool,
12101  m_MemoryTypeIndex,
12102  mem,
12103  allocInfo.allocationSize,
12104  m_NextBlockId++,
12105  m_Algorithm);
12106 
12107  m_Blocks.push_back(pBlock);
12108  if(pNewBlockIndex != VMA_NULL)
12109  {
12110  *pNewBlockIndex = m_Blocks.size() - 1;
12111  }
12112 
12113  return VK_SUCCESS;
12114 }
12115 
12116 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12117  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12118  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12119 {
12120  const size_t blockCount = m_Blocks.size();
12121  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12122 
12123  enum BLOCK_FLAG
12124  {
12125  BLOCK_FLAG_USED = 0x00000001,
12126  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12127  };
12128 
12129  struct BlockInfo
12130  {
12131  uint32_t flags;
12132  void* pMappedData;
12133  };
12134  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12135  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12136  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12137 
12138  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12139  const size_t moveCount = moves.size();
12140  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12141  {
12142  const VmaDefragmentationMove& move = moves[moveIndex];
12143  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12144  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12145  }
12146 
12147  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12148 
12149  // Go over all blocks. Get mapped pointer or map if necessary.
12150  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12151  {
12152  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12153  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12154  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12155  {
12156  currBlockInfo.pMappedData = pBlock->GetMappedData();
12157  // It is not originally mapped - map it.
12158  if(currBlockInfo.pMappedData == VMA_NULL)
12159  {
12160  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12161  if(pDefragCtx->res == VK_SUCCESS)
12162  {
12163  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12164  }
12165  }
12166  }
12167  }
12168 
12169  // Go over all moves. Do actual data transfer.
12170  if(pDefragCtx->res == VK_SUCCESS)
12171  {
12172  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12173  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12174 
12175  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12176  {
12177  const VmaDefragmentationMove& move = moves[moveIndex];
12178 
12179  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12180  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12181 
12182  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12183 
12184  // Invalidate source.
12185  if(isNonCoherent)
12186  {
12187  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12188  memRange.memory = pSrcBlock->GetDeviceMemory();
12189  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12190  memRange.size = VMA_MIN(
12191  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12192  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12193  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12194  }
12195 
12196  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12197  memmove(
12198  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12199  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12200  static_cast<size_t>(move.size));
12201 
12202  if(IsCorruptionDetectionEnabled())
12203  {
12204  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12205  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12206  }
12207 
12208  // Flush destination.
12209  if(isNonCoherent)
12210  {
12211  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12212  memRange.memory = pDstBlock->GetDeviceMemory();
12213  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12214  memRange.size = VMA_MIN(
12215  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12216  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12217  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12218  }
12219  }
12220  }
12221 
12222  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12223  // Regardless of pCtx->res == VK_SUCCESS.
12224  for(size_t blockIndex = blockCount; blockIndex--; )
12225  {
12226  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12227  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12228  {
12229  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12230  pBlock->Unmap(m_hAllocator, 1);
12231  }
12232  }
12233 }
12234 
12235 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12236  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12237  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12238  VkCommandBuffer commandBuffer)
12239 {
12240  const size_t blockCount = m_Blocks.size();
12241 
12242  pDefragCtx->blockContexts.resize(blockCount);
12243  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12244 
12245  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12246  const size_t moveCount = moves.size();
12247  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12248  {
12249  const VmaDefragmentationMove& move = moves[moveIndex];
12250  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12251  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12252  }
12253 
12254  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12255 
12256  // Go over all blocks. Create and bind buffer for whole block if necessary.
12257  {
12258  VkBufferCreateInfo bufCreateInfo;
12259  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12260 
12261  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12262  {
12263  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12264  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12265  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12266  {
12267  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12268  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12269  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12270  if(pDefragCtx->res == VK_SUCCESS)
12271  {
12272  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12273  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12274  }
12275  }
12276  }
12277  }
12278 
12279  // Go over all moves. Post data transfer commands to command buffer.
12280  if(pDefragCtx->res == VK_SUCCESS)
12281  {
12282  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12283  {
12284  const VmaDefragmentationMove& move = moves[moveIndex];
12285 
12286  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12287  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12288 
12289  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12290 
12291  VkBufferCopy region = {
12292  move.srcOffset,
12293  move.dstOffset,
12294  move.size };
12295  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12296  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12297  }
12298  }
12299 
12300  // Save buffers to defrag context for later destruction.
12301  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12302  {
12303  pDefragCtx->res = VK_NOT_READY;
12304  }
12305 }
12306 
12307 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12308 {
12309  m_HasEmptyBlock = false;
12310  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12311  {
12312  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12313  if(pBlock->m_pMetadata->IsEmpty())
12314  {
12315  if(m_Blocks.size() > m_MinBlockCount)
12316  {
12317  if(pDefragmentationStats != VMA_NULL)
12318  {
12319  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12320  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12321  }
12322 
12323  VmaVectorRemove(m_Blocks, blockIndex);
12324  pBlock->Destroy(m_hAllocator);
12325  vma_delete(m_hAllocator, pBlock);
12326  }
12327  else
12328  {
12329  m_HasEmptyBlock = true;
12330  }
12331  }
12332  }
12333 }
12334 
12335 #if VMA_STATS_STRING_ENABLED
12336 
12337 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12338 {
12339  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12340 
12341  json.BeginObject();
12342 
12343  if(m_IsCustomPool)
12344  {
12345  json.WriteString("MemoryTypeIndex");
12346  json.WriteNumber(m_MemoryTypeIndex);
12347 
12348  json.WriteString("BlockSize");
12349  json.WriteNumber(m_PreferredBlockSize);
12350 
12351  json.WriteString("BlockCount");
12352  json.BeginObject(true);
12353  if(m_MinBlockCount > 0)
12354  {
12355  json.WriteString("Min");
12356  json.WriteNumber((uint64_t)m_MinBlockCount);
12357  }
12358  if(m_MaxBlockCount < SIZE_MAX)
12359  {
12360  json.WriteString("Max");
12361  json.WriteNumber((uint64_t)m_MaxBlockCount);
12362  }
12363  json.WriteString("Cur");
12364  json.WriteNumber((uint64_t)m_Blocks.size());
12365  json.EndObject();
12366 
12367  if(m_FrameInUseCount > 0)
12368  {
12369  json.WriteString("FrameInUseCount");
12370  json.WriteNumber(m_FrameInUseCount);
12371  }
12372 
12373  if(m_Algorithm != 0)
12374  {
12375  json.WriteString("Algorithm");
12376  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12377  }
12378  }
12379  else
12380  {
12381  json.WriteString("PreferredBlockSize");
12382  json.WriteNumber(m_PreferredBlockSize);
12383  }
12384 
12385  json.WriteString("Blocks");
12386  json.BeginObject();
12387  for(size_t i = 0; i < m_Blocks.size(); ++i)
12388  {
12389  json.BeginString();
12390  json.ContinueString(m_Blocks[i]->GetId());
12391  json.EndString();
12392 
12393  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12394  }
12395  json.EndObject();
12396 
12397  json.EndObject();
12398 }
12399 
12400 #endif // #if VMA_STATS_STRING_ENABLED
12401 
12402 void VmaBlockVector::Defragment(
12403  class VmaBlockVectorDefragmentationContext* pCtx,
12404  VmaDefragmentationStats* pStats,
12405  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12406  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12407  VkCommandBuffer commandBuffer)
12408 {
12409  pCtx->res = VK_SUCCESS;
12410 
12411  const VkMemoryPropertyFlags memPropFlags =
12412  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12413  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12414 
12415  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12416  isHostVisible;
12417  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12418  !IsCorruptionDetectionEnabled() &&
12419  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12420 
12421  // There are options to defragment this memory type.
12422  if(canDefragmentOnCpu || canDefragmentOnGpu)
12423  {
12424  bool defragmentOnGpu;
12425  // There is only one option to defragment this memory type.
12426  if(canDefragmentOnGpu != canDefragmentOnCpu)
12427  {
12428  defragmentOnGpu = canDefragmentOnGpu;
12429  }
12430  // Both options are available: Heuristics to choose the best one.
12431  else
12432  {
12433  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12434  m_hAllocator->IsIntegratedGpu();
12435  }
12436 
12437  bool overlappingMoveSupported = !defragmentOnGpu;
12438 
12439  if(m_hAllocator->m_UseMutex)
12440  {
12441  m_Mutex.LockWrite();
12442  pCtx->mutexLocked = true;
12443  }
12444 
12445  pCtx->Begin(overlappingMoveSupported);
12446 
12447  // Defragment.
12448 
12449  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12450  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12451  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12452  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12453  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12454 
12455  // Accumulate statistics.
12456  if(pStats != VMA_NULL)
12457  {
12458  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12459  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12460  pStats->bytesMoved += bytesMoved;
12461  pStats->allocationsMoved += allocationsMoved;
12462  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12463  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12464  if(defragmentOnGpu)
12465  {
12466  maxGpuBytesToMove -= bytesMoved;
12467  maxGpuAllocationsToMove -= allocationsMoved;
12468  }
12469  else
12470  {
12471  maxCpuBytesToMove -= bytesMoved;
12472  maxCpuAllocationsToMove -= allocationsMoved;
12473  }
12474  }
12475 
12476  if(pCtx->res >= VK_SUCCESS)
12477  {
12478  if(defragmentOnGpu)
12479  {
12480  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12481  }
12482  else
12483  {
12484  ApplyDefragmentationMovesCpu(pCtx, moves);
12485  }
12486  }
12487  }
12488 }
12489 
12490 void VmaBlockVector::DefragmentationEnd(
12491  class VmaBlockVectorDefragmentationContext* pCtx,
12492  VmaDefragmentationStats* pStats)
12493 {
12494  // Destroy buffers.
12495  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12496  {
12497  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12498  if(blockCtx.hBuffer)
12499  {
12500  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12501  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12502  }
12503  }
12504 
12505  if(pCtx->res >= VK_SUCCESS)
12506  {
12507  FreeEmptyBlocks(pStats);
12508  }
12509 
12510  if(pCtx->mutexLocked)
12511  {
12512  VMA_ASSERT(m_hAllocator->m_UseMutex);
12513  m_Mutex.UnlockWrite();
12514  }
12515 }
12516 
12517 size_t VmaBlockVector::CalcAllocationCount() const
12518 {
12519  size_t result = 0;
12520  for(size_t i = 0; i < m_Blocks.size(); ++i)
12521  {
12522  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12523  }
12524  return result;
12525 }
12526 
12527 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12528 {
12529  if(m_BufferImageGranularity == 1)
12530  {
12531  return false;
12532  }
12533  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12534  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12535  {
12536  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12537  VMA_ASSERT(m_Algorithm == 0);
12538  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12539  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12540  {
12541  return true;
12542  }
12543  }
12544  return false;
12545 }
12546 
12547 void VmaBlockVector::MakePoolAllocationsLost(
12548  uint32_t currentFrameIndex,
12549  size_t* pLostAllocationCount)
12550 {
12551  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12552  size_t lostAllocationCount = 0;
12553  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12554  {
12555  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12556  VMA_ASSERT(pBlock);
12557  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12558  }
12559  if(pLostAllocationCount != VMA_NULL)
12560  {
12561  *pLostAllocationCount = lostAllocationCount;
12562  }
12563 }
12564 
12565 VkResult VmaBlockVector::CheckCorruption()
12566 {
12567  if(!IsCorruptionDetectionEnabled())
12568  {
12569  return VK_ERROR_FEATURE_NOT_PRESENT;
12570  }
12571 
12572  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12573  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12574  {
12575  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12576  VMA_ASSERT(pBlock);
12577  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12578  if(res != VK_SUCCESS)
12579  {
12580  return res;
12581  }
12582  }
12583  return VK_SUCCESS;
12584 }
12585 
12586 void VmaBlockVector::AddStats(VmaStats* pStats)
12587 {
12588  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12589  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12590 
12591  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12592 
12593  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12594  {
12595  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12596  VMA_ASSERT(pBlock);
12597  VMA_HEAVY_ASSERT(pBlock->Validate());
12598  VmaStatInfo allocationStatInfo;
12599  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12600  VmaAddStatInfo(pStats->total, allocationStatInfo);
12601  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12602  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12603  }
12604 }
12605 
12607 // VmaDefragmentationAlgorithm_Generic members definition
12608 
12609 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12610  VmaAllocator hAllocator,
12611  VmaBlockVector* pBlockVector,
12612  uint32_t currentFrameIndex,
12613  bool overlappingMoveSupported) :
12614  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12615  m_AllocationCount(0),
12616  m_AllAllocations(false),
12617  m_BytesMoved(0),
12618  m_AllocationsMoved(0),
12619  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12620 {
12621  // Create block info for each block.
12622  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12623  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12624  {
12625  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12626  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12627  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12628  m_Blocks.push_back(pBlockInfo);
12629  }
12630 
12631  // Sort them by m_pBlock pointer value.
12632  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12633 }
12634 
12635 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12636 {
12637  for(size_t i = m_Blocks.size(); i--; )
12638  {
12639  vma_delete(m_hAllocator, m_Blocks[i]);
12640  }
12641 }
12642 
12643 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12644 {
12645  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12646  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12647  {
12648  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12649  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12650  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12651  {
12652  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12653  (*it)->m_Allocations.push_back(allocInfo);
12654  }
12655  else
12656  {
12657  VMA_ASSERT(0);
12658  }
12659 
12660  ++m_AllocationCount;
12661  }
12662 }
12663 
12664 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12665  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12666  VkDeviceSize maxBytesToMove,
12667  uint32_t maxAllocationsToMove)
12668 {
12669  if(m_Blocks.empty())
12670  {
12671  return VK_SUCCESS;
12672  }
12673 
12674  // This is a choice based on research.
12675  // Option 1:
12676  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12677  // Option 2:
12678  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12679  // Option 3:
12680  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12681 
12682  size_t srcBlockMinIndex = 0;
12683  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12684  /*
12685  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12686  {
12687  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12688  if(blocksWithNonMovableCount > 0)
12689  {
12690  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12691  }
12692  }
12693  */
12694 
12695  size_t srcBlockIndex = m_Blocks.size() - 1;
12696  size_t srcAllocIndex = SIZE_MAX;
12697  for(;;)
12698  {
12699  // 1. Find next allocation to move.
12700  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12701  // 1.2. Then start from last to first m_Allocations.
12702  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12703  {
12704  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12705  {
12706  // Finished: no more allocations to process.
12707  if(srcBlockIndex == srcBlockMinIndex)
12708  {
12709  return VK_SUCCESS;
12710  }
12711  else
12712  {
12713  --srcBlockIndex;
12714  srcAllocIndex = SIZE_MAX;
12715  }
12716  }
12717  else
12718  {
12719  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12720  }
12721  }
12722 
12723  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12724  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12725 
12726  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12727  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12728  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12729  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12730 
12731  // 2. Try to find new place for this allocation in preceding or current block.
12732  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12733  {
12734  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12735  VmaAllocationRequest dstAllocRequest;
12736  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12737  m_CurrentFrameIndex,
12738  m_pBlockVector->GetFrameInUseCount(),
12739  m_pBlockVector->GetBufferImageGranularity(),
12740  size,
12741  alignment,
12742  false, // upperAddress
12743  suballocType,
12744  false, // canMakeOtherLost
12745  strategy,
12746  &dstAllocRequest) &&
12747  MoveMakesSense(
12748  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12749  {
12750  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12751 
12752  // Reached limit on number of allocations or bytes to move.
12753  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12754  (m_BytesMoved + size > maxBytesToMove))
12755  {
12756  return VK_SUCCESS;
12757  }
12758 
12759  VmaDefragmentationMove move;
12760  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12761  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12762  move.srcOffset = srcOffset;
12763  move.dstOffset = dstAllocRequest.offset;
12764  move.size = size;
12765  moves.push_back(move);
12766 
12767  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12768  dstAllocRequest,
12769  suballocType,
12770  size,
12771  allocInfo.m_hAllocation);
12772  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12773 
12774  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12775 
12776  if(allocInfo.m_pChanged != VMA_NULL)
12777  {
12778  *allocInfo.m_pChanged = VK_TRUE;
12779  }
12780 
12781  ++m_AllocationsMoved;
12782  m_BytesMoved += size;
12783 
12784  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12785 
12786  break;
12787  }
12788  }
12789 
12790  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12791 
12792  if(srcAllocIndex > 0)
12793  {
12794  --srcAllocIndex;
12795  }
12796  else
12797  {
12798  if(srcBlockIndex > 0)
12799  {
12800  --srcBlockIndex;
12801  srcAllocIndex = SIZE_MAX;
12802  }
12803  else
12804  {
12805  return VK_SUCCESS;
12806  }
12807  }
12808  }
12809 }
12810 
12811 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12812 {
12813  size_t result = 0;
12814  for(size_t i = 0; i < m_Blocks.size(); ++i)
12815  {
12816  if(m_Blocks[i]->m_HasNonMovableAllocations)
12817  {
12818  ++result;
12819  }
12820  }
12821  return result;
12822 }
12823 
12824 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12825  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12826  VkDeviceSize maxBytesToMove,
12827  uint32_t maxAllocationsToMove)
12828 {
12829  if(!m_AllAllocations && m_AllocationCount == 0)
12830  {
12831  return VK_SUCCESS;
12832  }
12833 
12834  const size_t blockCount = m_Blocks.size();
12835  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12836  {
12837  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12838 
12839  if(m_AllAllocations)
12840  {
12841  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12842  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12843  it != pMetadata->m_Suballocations.end();
12844  ++it)
12845  {
12846  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12847  {
12848  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12849  pBlockInfo->m_Allocations.push_back(allocInfo);
12850  }
12851  }
12852  }
12853 
12854  pBlockInfo->CalcHasNonMovableAllocations();
12855 
12856  // This is a choice based on research.
12857  // Option 1:
12858  pBlockInfo->SortAllocationsByOffsetDescending();
12859  // Option 2:
12860  //pBlockInfo->SortAllocationsBySizeDescending();
12861  }
12862 
12863  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12864  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12865 
12866  // This is a choice based on research.
12867  const uint32_t roundCount = 2;
12868 
12869  // Execute defragmentation rounds (the main part).
12870  VkResult result = VK_SUCCESS;
12871  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12872  {
12873  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12874  }
12875 
12876  return result;
12877 }
12878 
12879 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12880  size_t dstBlockIndex, VkDeviceSize dstOffset,
12881  size_t srcBlockIndex, VkDeviceSize srcOffset)
12882 {
12883  if(dstBlockIndex < srcBlockIndex)
12884  {
12885  return true;
12886  }
12887  if(dstBlockIndex > srcBlockIndex)
12888  {
12889  return false;
12890  }
12891  if(dstOffset < srcOffset)
12892  {
12893  return true;
12894  }
12895  return false;
12896 }
12897 
12899 // VmaDefragmentationAlgorithm_Fast
12900 
12901 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12902  VmaAllocator hAllocator,
12903  VmaBlockVector* pBlockVector,
12904  uint32_t currentFrameIndex,
12905  bool overlappingMoveSupported) :
12906  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12907  m_OverlappingMoveSupported(overlappingMoveSupported),
12908  m_AllocationCount(0),
12909  m_AllAllocations(false),
12910  m_BytesMoved(0),
12911  m_AllocationsMoved(0),
12912  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12913 {
12914  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12915 
12916 }
12917 
12918 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12919 {
12920 }
12921 
12922 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12923  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12924  VkDeviceSize maxBytesToMove,
12925  uint32_t maxAllocationsToMove)
12926 {
12927  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12928 
12929  const size_t blockCount = m_pBlockVector->GetBlockCount();
12930  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12931  {
12932  return VK_SUCCESS;
12933  }
12934 
12935  PreprocessMetadata();
12936 
12937  // Sort blocks in order from most destination.
12938 
12939  m_BlockInfos.resize(blockCount);
12940  for(size_t i = 0; i < blockCount; ++i)
12941  {
12942  m_BlockInfos[i].origBlockIndex = i;
12943  }
12944 
12945  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12946  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12947  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12948  });
12949 
12950  // THE MAIN ALGORITHM
12951 
12952  FreeSpaceDatabase freeSpaceDb;
12953 
12954  size_t dstBlockInfoIndex = 0;
12955  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12956  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12957  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12958  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12959  VkDeviceSize dstOffset = 0;
12960 
12961  bool end = false;
12962  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12963  {
12964  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12965  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12966  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12967  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12968  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12969  {
12970  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12971  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12972  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12973  if(m_AllocationsMoved == maxAllocationsToMove ||
12974  m_BytesMoved + srcAllocSize > maxBytesToMove)
12975  {
12976  end = true;
12977  break;
12978  }
12979  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12980 
12981  // Try to place it in one of free spaces from the database.
12982  size_t freeSpaceInfoIndex;
12983  VkDeviceSize dstAllocOffset;
12984  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12985  freeSpaceInfoIndex, dstAllocOffset))
12986  {
12987  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12988  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12989  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12990 
12991  // Same block
12992  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12993  {
12994  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12995 
12996  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12997 
12998  VmaSuballocation suballoc = *srcSuballocIt;
12999  suballoc.offset = dstAllocOffset;
13000  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13001  m_BytesMoved += srcAllocSize;
13002  ++m_AllocationsMoved;
13003 
13004  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13005  ++nextSuballocIt;
13006  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13007  srcSuballocIt = nextSuballocIt;
13008 
13009  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13010 
13011  VmaDefragmentationMove move = {
13012  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13013  srcAllocOffset, dstAllocOffset,
13014  srcAllocSize };
13015  moves.push_back(move);
13016  }
13017  // Different block
13018  else
13019  {
13020  // MOVE OPTION 2: Move the allocation to a different block.
13021 
13022  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13023 
13024  VmaSuballocation suballoc = *srcSuballocIt;
13025  suballoc.offset = dstAllocOffset;
13026  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13027  m_BytesMoved += srcAllocSize;
13028  ++m_AllocationsMoved;
13029 
13030  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13031  ++nextSuballocIt;
13032  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13033  srcSuballocIt = nextSuballocIt;
13034 
13035  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13036 
13037  VmaDefragmentationMove move = {
13038  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13039  srcAllocOffset, dstAllocOffset,
13040  srcAllocSize };
13041  moves.push_back(move);
13042  }
13043  }
13044  else
13045  {
13046  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13047 
13048  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13049  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13050  dstAllocOffset + srcAllocSize > dstBlockSize)
13051  {
13052  // But before that, register remaining free space at the end of dst block.
13053  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13054 
13055  ++dstBlockInfoIndex;
13056  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13057  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13058  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13059  dstBlockSize = pDstMetadata->GetSize();
13060  dstOffset = 0;
13061  dstAllocOffset = 0;
13062  }
13063 
13064  // Same block
13065  if(dstBlockInfoIndex == srcBlockInfoIndex)
13066  {
13067  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13068 
13069  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13070 
13071  bool skipOver = overlap;
13072  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13073  {
13074  // If destination and source place overlap, skip if it would move it
13075  // by only < 1/64 of its size.
13076  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13077  }
13078 
13079  if(skipOver)
13080  {
13081  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13082 
13083  dstOffset = srcAllocOffset + srcAllocSize;
13084  ++srcSuballocIt;
13085  }
13086  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13087  else
13088  {
13089  srcSuballocIt->offset = dstAllocOffset;
13090  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13091  dstOffset = dstAllocOffset + srcAllocSize;
13092  m_BytesMoved += srcAllocSize;
13093  ++m_AllocationsMoved;
13094  ++srcSuballocIt;
13095  VmaDefragmentationMove move = {
13096  srcOrigBlockIndex, dstOrigBlockIndex,
13097  srcAllocOffset, dstAllocOffset,
13098  srcAllocSize };
13099  moves.push_back(move);
13100  }
13101  }
13102  // Different block
13103  else
13104  {
13105  // MOVE OPTION 2: Move the allocation to a different block.
13106 
13107  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13108  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13109 
13110  VmaSuballocation suballoc = *srcSuballocIt;
13111  suballoc.offset = dstAllocOffset;
13112  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13113  dstOffset = dstAllocOffset + srcAllocSize;
13114  m_BytesMoved += srcAllocSize;
13115  ++m_AllocationsMoved;
13116 
13117  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13118  ++nextSuballocIt;
13119  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13120  srcSuballocIt = nextSuballocIt;
13121 
13122  pDstMetadata->m_Suballocations.push_back(suballoc);
13123 
13124  VmaDefragmentationMove move = {
13125  srcOrigBlockIndex, dstOrigBlockIndex,
13126  srcAllocOffset, dstAllocOffset,
13127  srcAllocSize };
13128  moves.push_back(move);
13129  }
13130  }
13131  }
13132  }
13133 
13134  m_BlockInfos.clear();
13135 
13136  PostprocessMetadata();
13137 
13138  return VK_SUCCESS;
13139 }
13140 
13141 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13142 {
13143  const size_t blockCount = m_pBlockVector->GetBlockCount();
13144  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13145  {
13146  VmaBlockMetadata_Generic* const pMetadata =
13147  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13148  pMetadata->m_FreeCount = 0;
13149  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13150  pMetadata->m_FreeSuballocationsBySize.clear();
13151  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13152  it != pMetadata->m_Suballocations.end(); )
13153  {
13154  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13155  {
13156  VmaSuballocationList::iterator nextIt = it;
13157  ++nextIt;
13158  pMetadata->m_Suballocations.erase(it);
13159  it = nextIt;
13160  }
13161  else
13162  {
13163  ++it;
13164  }
13165  }
13166  }
13167 }
13168 
13169 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13170 {
13171  const size_t blockCount = m_pBlockVector->GetBlockCount();
13172  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13173  {
13174  VmaBlockMetadata_Generic* const pMetadata =
13175  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13176  const VkDeviceSize blockSize = pMetadata->GetSize();
13177 
13178  // No allocations in this block - entire area is free.
13179  if(pMetadata->m_Suballocations.empty())
13180  {
13181  pMetadata->m_FreeCount = 1;
13182  //pMetadata->m_SumFreeSize is already set to blockSize.
13183  VmaSuballocation suballoc = {
13184  0, // offset
13185  blockSize, // size
13186  VMA_NULL, // hAllocation
13187  VMA_SUBALLOCATION_TYPE_FREE };
13188  pMetadata->m_Suballocations.push_back(suballoc);
13189  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13190  }
13191  // There are some allocations in this block.
13192  else
13193  {
13194  VkDeviceSize offset = 0;
13195  VmaSuballocationList::iterator it;
13196  for(it = pMetadata->m_Suballocations.begin();
13197  it != pMetadata->m_Suballocations.end();
13198  ++it)
13199  {
13200  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13201  VMA_ASSERT(it->offset >= offset);
13202 
13203  // Need to insert preceding free space.
13204  if(it->offset > offset)
13205  {
13206  ++pMetadata->m_FreeCount;
13207  const VkDeviceSize freeSize = it->offset - offset;
13208  VmaSuballocation suballoc = {
13209  offset, // offset
13210  freeSize, // size
13211  VMA_NULL, // hAllocation
13212  VMA_SUBALLOCATION_TYPE_FREE };
13213  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13214  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13215  {
13216  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13217  }
13218  }
13219 
13220  pMetadata->m_SumFreeSize -= it->size;
13221  offset = it->offset + it->size;
13222  }
13223 
13224  // Need to insert trailing free space.
13225  if(offset < blockSize)
13226  {
13227  ++pMetadata->m_FreeCount;
13228  const VkDeviceSize freeSize = blockSize - offset;
13229  VmaSuballocation suballoc = {
13230  offset, // offset
13231  freeSize, // size
13232  VMA_NULL, // hAllocation
13233  VMA_SUBALLOCATION_TYPE_FREE };
13234  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13235  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13236  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13237  {
13238  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13239  }
13240  }
13241 
13242  VMA_SORT(
13243  pMetadata->m_FreeSuballocationsBySize.begin(),
13244  pMetadata->m_FreeSuballocationsBySize.end(),
13245  VmaSuballocationItemSizeLess());
13246  }
13247 
13248  VMA_HEAVY_ASSERT(pMetadata->Validate());
13249  }
13250 }
13251 
13252 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13253 {
13254  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13255  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13256  while(it != pMetadata->m_Suballocations.end())
13257  {
13258  if(it->offset < suballoc.offset)
13259  {
13260  ++it;
13261  }
13262  }
13263  pMetadata->m_Suballocations.insert(it, suballoc);
13264 }
13265 
13267 // VmaBlockVectorDefragmentationContext
13268 
13269 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13270  VmaAllocator hAllocator,
13271  VmaPool hCustomPool,
13272  VmaBlockVector* pBlockVector,
13273  uint32_t currFrameIndex) :
13274  res(VK_SUCCESS),
13275  mutexLocked(false),
13276  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13277  m_hAllocator(hAllocator),
13278  m_hCustomPool(hCustomPool),
13279  m_pBlockVector(pBlockVector),
13280  m_CurrFrameIndex(currFrameIndex),
13281  m_pAlgorithm(VMA_NULL),
13282  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13283  m_AllAllocations(false)
13284 {
13285 }
13286 
13287 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13288 {
13289  vma_delete(m_hAllocator, m_pAlgorithm);
13290 }
13291 
13292 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13293 {
13294  AllocInfo info = { hAlloc, pChanged };
13295  m_Allocations.push_back(info);
13296 }
13297 
13298 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13299 {
13300  const bool allAllocations = m_AllAllocations ||
13301  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13302 
13303  /********************************
13304  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13305  ********************************/
13306 
13307  /*
13308  Fast algorithm is supported only when certain criteria are met:
13309  - VMA_DEBUG_MARGIN is 0.
13310  - All allocations in this block vector are moveable.
13311  - There is no possibility of image/buffer granularity conflict.
13312  */
13313  if(VMA_DEBUG_MARGIN == 0 &&
13314  allAllocations &&
13315  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13316  {
13317  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13318  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13319  }
13320  else
13321  {
13322  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13323  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13324  }
13325 
13326  if(allAllocations)
13327  {
13328  m_pAlgorithm->AddAll();
13329  }
13330  else
13331  {
13332  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13333  {
13334  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13335  }
13336  }
13337 }
13338 
13340 // VmaDefragmentationContext
13341 
13342 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13343  VmaAllocator hAllocator,
13344  uint32_t currFrameIndex,
13345  uint32_t flags,
13346  VmaDefragmentationStats* pStats) :
13347  m_hAllocator(hAllocator),
13348  m_CurrFrameIndex(currFrameIndex),
13349  m_Flags(flags),
13350  m_pStats(pStats),
13351  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13352 {
13353  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13354 }
13355 
13356 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13357 {
13358  for(size_t i = m_CustomPoolContexts.size(); i--; )
13359  {
13360  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13361  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13362  vma_delete(m_hAllocator, pBlockVectorCtx);
13363  }
13364  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13365  {
13366  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13367  if(pBlockVectorCtx)
13368  {
13369  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13370  vma_delete(m_hAllocator, pBlockVectorCtx);
13371  }
13372  }
13373 }
13374 
13375 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13376 {
13377  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13378  {
13379  VmaPool pool = pPools[poolIndex];
13380  VMA_ASSERT(pool);
13381  // Pools with algorithm other than default are not defragmented.
13382  if(pool->m_BlockVector.GetAlgorithm() == 0)
13383  {
13384  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13385 
13386  for(size_t i = m_CustomPoolContexts.size(); i--; )
13387  {
13388  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13389  {
13390  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13391  break;
13392  }
13393  }
13394 
13395  if(!pBlockVectorDefragCtx)
13396  {
13397  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13398  m_hAllocator,
13399  pool,
13400  &pool->m_BlockVector,
13401  m_CurrFrameIndex);
13402  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13403  }
13404 
13405  pBlockVectorDefragCtx->AddAll();
13406  }
13407  }
13408 }
13409 
13410 void VmaDefragmentationContext_T::AddAllocations(
13411  uint32_t allocationCount,
13412  VmaAllocation* pAllocations,
13413  VkBool32* pAllocationsChanged)
13414 {
13415  // Dispatch pAllocations among defragmentators. Create them when necessary.
13416  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13417  {
13418  const VmaAllocation hAlloc = pAllocations[allocIndex];
13419  VMA_ASSERT(hAlloc);
13420  // DedicatedAlloc cannot be defragmented.
13421  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13422  // Lost allocation cannot be defragmented.
13423  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13424  {
13425  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13426 
13427  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13428  // This allocation belongs to custom pool.
13429  if(hAllocPool != VK_NULL_HANDLE)
13430  {
13431  // Pools with algorithm other than default are not defragmented.
13432  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13433  {
13434  for(size_t i = m_CustomPoolContexts.size(); i--; )
13435  {
13436  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13437  {
13438  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13439  break;
13440  }
13441  }
13442  if(!pBlockVectorDefragCtx)
13443  {
13444  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13445  m_hAllocator,
13446  hAllocPool,
13447  &hAllocPool->m_BlockVector,
13448  m_CurrFrameIndex);
13449  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13450  }
13451  }
13452  }
13453  // This allocation belongs to default pool.
13454  else
13455  {
13456  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13457  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13458  if(!pBlockVectorDefragCtx)
13459  {
13460  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13461  m_hAllocator,
13462  VMA_NULL, // hCustomPool
13463  m_hAllocator->m_pBlockVectors[memTypeIndex],
13464  m_CurrFrameIndex);
13465  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13466  }
13467  }
13468 
13469  if(pBlockVectorDefragCtx)
13470  {
13471  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13472  &pAllocationsChanged[allocIndex] : VMA_NULL;
13473  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13474  }
13475  }
13476  }
13477 }
13478 
13479 VkResult VmaDefragmentationContext_T::Defragment(
13480  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13481  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13482  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13483 {
13484  if(pStats)
13485  {
13486  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13487  }
13488 
13489  if(commandBuffer == VK_NULL_HANDLE)
13490  {
13491  maxGpuBytesToMove = 0;
13492  maxGpuAllocationsToMove = 0;
13493  }
13494 
13495  VkResult res = VK_SUCCESS;
13496 
13497  // Process default pools.
13498  for(uint32_t memTypeIndex = 0;
13499  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13500  ++memTypeIndex)
13501  {
13502  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13503  if(pBlockVectorCtx)
13504  {
13505  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13506  pBlockVectorCtx->GetBlockVector()->Defragment(
13507  pBlockVectorCtx,
13508  pStats,
13509  maxCpuBytesToMove, maxCpuAllocationsToMove,
13510  maxGpuBytesToMove, maxGpuAllocationsToMove,
13511  commandBuffer);
13512  if(pBlockVectorCtx->res != VK_SUCCESS)
13513  {
13514  res = pBlockVectorCtx->res;
13515  }
13516  }
13517  }
13518 
13519  // Process custom pools.
13520  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13521  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13522  ++customCtxIndex)
13523  {
13524  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13525  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13526  pBlockVectorCtx->GetBlockVector()->Defragment(
13527  pBlockVectorCtx,
13528  pStats,
13529  maxCpuBytesToMove, maxCpuAllocationsToMove,
13530  maxGpuBytesToMove, maxGpuAllocationsToMove,
13531  commandBuffer);
13532  if(pBlockVectorCtx->res != VK_SUCCESS)
13533  {
13534  res = pBlockVectorCtx->res;
13535  }
13536  }
13537 
13538  return res;
13539 }
13540 
13542 // VmaRecorder
13543 
13544 #if VMA_RECORDING_ENABLED
13545 
13546 VmaRecorder::VmaRecorder() :
13547  m_UseMutex(true),
13548  m_Flags(0),
13549  m_File(VMA_NULL),
13550  m_Freq(INT64_MAX),
13551  m_StartCounter(INT64_MAX)
13552 {
13553 }
13554 
13555 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13556 {
13557  m_UseMutex = useMutex;
13558  m_Flags = settings.flags;
13559 
13560  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13561  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13562 
13563  // Open file for writing.
13564  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13565  if(err != 0)
13566  {
13567  return VK_ERROR_INITIALIZATION_FAILED;
13568  }
13569 
13570  // Write header.
13571  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13572  fprintf(m_File, "%s\n", "1,6");
13573 
13574  return VK_SUCCESS;
13575 }
13576 
13577 VmaRecorder::~VmaRecorder()
13578 {
13579  if(m_File != VMA_NULL)
13580  {
13581  fclose(m_File);
13582  }
13583 }
13584 
13585 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13586 {
13587  CallParams callParams;
13588  GetBasicParams(callParams);
13589 
13590  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13591  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13592  Flush();
13593 }
13594 
13595 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13596 {
13597  CallParams callParams;
13598  GetBasicParams(callParams);
13599 
13600  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13601  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13602  Flush();
13603 }
13604 
13605 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13606 {
13607  CallParams callParams;
13608  GetBasicParams(callParams);
13609 
13610  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13611  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13612  createInfo.memoryTypeIndex,
13613  createInfo.flags,
13614  createInfo.blockSize,
13615  (uint64_t)createInfo.minBlockCount,
13616  (uint64_t)createInfo.maxBlockCount,
13617  createInfo.frameInUseCount,
13618  pool);
13619  Flush();
13620 }
13621 
13622 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13623 {
13624  CallParams callParams;
13625  GetBasicParams(callParams);
13626 
13627  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13628  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13629  pool);
13630  Flush();
13631 }
13632 
13633 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13634  const VkMemoryRequirements& vkMemReq,
13635  const VmaAllocationCreateInfo& createInfo,
13636  VmaAllocation allocation)
13637 {
13638  CallParams callParams;
13639  GetBasicParams(callParams);
13640 
13641  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13642  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13643  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13644  vkMemReq.size,
13645  vkMemReq.alignment,
13646  vkMemReq.memoryTypeBits,
13647  createInfo.flags,
13648  createInfo.usage,
13649  createInfo.requiredFlags,
13650  createInfo.preferredFlags,
13651  createInfo.memoryTypeBits,
13652  createInfo.pool,
13653  allocation,
13654  userDataStr.GetString());
13655  Flush();
13656 }
13657 
13658 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13659  const VkMemoryRequirements& vkMemReq,
13660  const VmaAllocationCreateInfo& createInfo,
13661  uint64_t allocationCount,
13662  const VmaAllocation* pAllocations)
13663 {
13664  CallParams callParams;
13665  GetBasicParams(callParams);
13666 
13667  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13668  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13669  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13670  vkMemReq.size,
13671  vkMemReq.alignment,
13672  vkMemReq.memoryTypeBits,
13673  createInfo.flags,
13674  createInfo.usage,
13675  createInfo.requiredFlags,
13676  createInfo.preferredFlags,
13677  createInfo.memoryTypeBits,
13678  createInfo.pool);
13679  PrintPointerList(allocationCount, pAllocations);
13680  fprintf(m_File, ",%s\n", userDataStr.GetString());
13681  Flush();
13682 }
13683 
13684 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13685  const VkMemoryRequirements& vkMemReq,
13686  bool requiresDedicatedAllocation,
13687  bool prefersDedicatedAllocation,
13688  const VmaAllocationCreateInfo& createInfo,
13689  VmaAllocation allocation)
13690 {
13691  CallParams callParams;
13692  GetBasicParams(callParams);
13693 
13694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13695  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13696  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13697  vkMemReq.size,
13698  vkMemReq.alignment,
13699  vkMemReq.memoryTypeBits,
13700  requiresDedicatedAllocation ? 1 : 0,
13701  prefersDedicatedAllocation ? 1 : 0,
13702  createInfo.flags,
13703  createInfo.usage,
13704  createInfo.requiredFlags,
13705  createInfo.preferredFlags,
13706  createInfo.memoryTypeBits,
13707  createInfo.pool,
13708  allocation,
13709  userDataStr.GetString());
13710  Flush();
13711 }
13712 
13713 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13714  const VkMemoryRequirements& vkMemReq,
13715  bool requiresDedicatedAllocation,
13716  bool prefersDedicatedAllocation,
13717  const VmaAllocationCreateInfo& createInfo,
13718  VmaAllocation allocation)
13719 {
13720  CallParams callParams;
13721  GetBasicParams(callParams);
13722 
13723  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13724  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13725  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13726  vkMemReq.size,
13727  vkMemReq.alignment,
13728  vkMemReq.memoryTypeBits,
13729  requiresDedicatedAllocation ? 1 : 0,
13730  prefersDedicatedAllocation ? 1 : 0,
13731  createInfo.flags,
13732  createInfo.usage,
13733  createInfo.requiredFlags,
13734  createInfo.preferredFlags,
13735  createInfo.memoryTypeBits,
13736  createInfo.pool,
13737  allocation,
13738  userDataStr.GetString());
13739  Flush();
13740 }
13741 
13742 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13743  VmaAllocation allocation)
13744 {
13745  CallParams callParams;
13746  GetBasicParams(callParams);
13747 
13748  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13749  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13750  allocation);
13751  Flush();
13752 }
13753 
13754 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13755  uint64_t allocationCount,
13756  const VmaAllocation* pAllocations)
13757 {
13758  CallParams callParams;
13759  GetBasicParams(callParams);
13760 
13761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13762  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13763  PrintPointerList(allocationCount, pAllocations);
13764  fprintf(m_File, "\n");
13765  Flush();
13766 }
13767 
13768 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13769  VmaAllocation allocation,
13770  const void* pUserData)
13771 {
13772  CallParams callParams;
13773  GetBasicParams(callParams);
13774 
13775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13776  UserDataString userDataStr(
13777  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13778  pUserData);
13779  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13780  allocation,
13781  userDataStr.GetString());
13782  Flush();
13783 }
13784 
13785 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13786  VmaAllocation allocation)
13787 {
13788  CallParams callParams;
13789  GetBasicParams(callParams);
13790 
13791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13792  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13793  allocation);
13794  Flush();
13795 }
13796 
13797 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13798  VmaAllocation allocation)
13799 {
13800  CallParams callParams;
13801  GetBasicParams(callParams);
13802 
13803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13804  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13805  allocation);
13806  Flush();
13807 }
13808 
13809 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13810  VmaAllocation allocation)
13811 {
13812  CallParams callParams;
13813  GetBasicParams(callParams);
13814 
13815  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13816  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13817  allocation);
13818  Flush();
13819 }
13820 
13821 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13822  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13823 {
13824  CallParams callParams;
13825  GetBasicParams(callParams);
13826 
13827  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13828  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13829  allocation,
13830  offset,
13831  size);
13832  Flush();
13833 }
13834 
13835 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13836  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13837 {
13838  CallParams callParams;
13839  GetBasicParams(callParams);
13840 
13841  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13842  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13843  allocation,
13844  offset,
13845  size);
13846  Flush();
13847 }
13848 
13849 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13850  const VkBufferCreateInfo& bufCreateInfo,
13851  const VmaAllocationCreateInfo& allocCreateInfo,
13852  VmaAllocation allocation)
13853 {
13854  CallParams callParams;
13855  GetBasicParams(callParams);
13856 
13857  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13858  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13859  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13860  bufCreateInfo.flags,
13861  bufCreateInfo.size,
13862  bufCreateInfo.usage,
13863  bufCreateInfo.sharingMode,
13864  allocCreateInfo.flags,
13865  allocCreateInfo.usage,
13866  allocCreateInfo.requiredFlags,
13867  allocCreateInfo.preferredFlags,
13868  allocCreateInfo.memoryTypeBits,
13869  allocCreateInfo.pool,
13870  allocation,
13871  userDataStr.GetString());
13872  Flush();
13873 }
13874 
13875 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13876  const VkImageCreateInfo& imageCreateInfo,
13877  const VmaAllocationCreateInfo& allocCreateInfo,
13878  VmaAllocation allocation)
13879 {
13880  CallParams callParams;
13881  GetBasicParams(callParams);
13882 
13883  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13884  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13885  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13886  imageCreateInfo.flags,
13887  imageCreateInfo.imageType,
13888  imageCreateInfo.format,
13889  imageCreateInfo.extent.width,
13890  imageCreateInfo.extent.height,
13891  imageCreateInfo.extent.depth,
13892  imageCreateInfo.mipLevels,
13893  imageCreateInfo.arrayLayers,
13894  imageCreateInfo.samples,
13895  imageCreateInfo.tiling,
13896  imageCreateInfo.usage,
13897  imageCreateInfo.sharingMode,
13898  imageCreateInfo.initialLayout,
13899  allocCreateInfo.flags,
13900  allocCreateInfo.usage,
13901  allocCreateInfo.requiredFlags,
13902  allocCreateInfo.preferredFlags,
13903  allocCreateInfo.memoryTypeBits,
13904  allocCreateInfo.pool,
13905  allocation,
13906  userDataStr.GetString());
13907  Flush();
13908 }
13909 
13910 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13911  VmaAllocation allocation)
13912 {
13913  CallParams callParams;
13914  GetBasicParams(callParams);
13915 
13916  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13917  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13918  allocation);
13919  Flush();
13920 }
13921 
13922 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13923  VmaAllocation allocation)
13924 {
13925  CallParams callParams;
13926  GetBasicParams(callParams);
13927 
13928  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13929  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13930  allocation);
13931  Flush();
13932 }
13933 
13934 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13935  VmaAllocation allocation)
13936 {
13937  CallParams callParams;
13938  GetBasicParams(callParams);
13939 
13940  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13941  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13942  allocation);
13943  Flush();
13944 }
13945 
13946 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13947  VmaAllocation allocation)
13948 {
13949  CallParams callParams;
13950  GetBasicParams(callParams);
13951 
13952  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13953  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13954  allocation);
13955  Flush();
13956 }
13957 
13958 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13959  VmaPool pool)
13960 {
13961  CallParams callParams;
13962  GetBasicParams(callParams);
13963 
13964  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13965  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13966  pool);
13967  Flush();
13968 }
13969 
13970 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13971  const VmaDefragmentationInfo2& info,
13973 {
13974  CallParams callParams;
13975  GetBasicParams(callParams);
13976 
13977  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13978  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13979  info.flags);
13980  PrintPointerList(info.allocationCount, info.pAllocations);
13981  fprintf(m_File, ",");
13982  PrintPointerList(info.poolCount, info.pPools);
13983  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13984  info.maxCpuBytesToMove,
13986  info.maxGpuBytesToMove,
13988  info.commandBuffer,
13989  ctx);
13990  Flush();
13991 }
13992 
13993 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13995 {
13996  CallParams callParams;
13997  GetBasicParams(callParams);
13998 
13999  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14000  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14001  ctx);
14002  Flush();
14003 }
14004 
14005 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14006 {
14007  if(pUserData != VMA_NULL)
14008  {
14009  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14010  {
14011  m_Str = (const char*)pUserData;
14012  }
14013  else
14014  {
14015  sprintf_s(m_PtrStr, "%p", pUserData);
14016  m_Str = m_PtrStr;
14017  }
14018  }
14019  else
14020  {
14021  m_Str = "";
14022  }
14023 }
14024 
14025 void VmaRecorder::WriteConfiguration(
14026  const VkPhysicalDeviceProperties& devProps,
14027  const VkPhysicalDeviceMemoryProperties& memProps,
14028  bool dedicatedAllocationExtensionEnabled,
14029  bool bindMemory2ExtensionEnabled)
14030 {
14031  fprintf(m_File, "Config,Begin\n");
14032 
14033  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14034  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14035  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14036  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14037  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14038  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14039 
14040  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14041  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14042  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14043 
14044  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14045  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14046  {
14047  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14048  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14049  }
14050  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14051  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14052  {
14053  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14054  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14055  }
14056 
14057  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14058  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14059 
14060  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14061  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14062  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14063  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14064  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14065  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14066  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14067  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14068  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14069 
14070  fprintf(m_File, "Config,End\n");
14071 }
14072 
14073 void VmaRecorder::GetBasicParams(CallParams& outParams)
14074 {
14075  outParams.threadId = GetCurrentThreadId();
14076 
14077  LARGE_INTEGER counter;
14078  QueryPerformanceCounter(&counter);
14079  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14080 }
14081 
14082 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14083 {
14084  if(count)
14085  {
14086  fprintf(m_File, "%p", pItems[0]);
14087  for(uint64_t i = 1; i < count; ++i)
14088  {
14089  fprintf(m_File, " %p", pItems[i]);
14090  }
14091  }
14092 }
14093 
14094 void VmaRecorder::Flush()
14095 {
14096  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14097  {
14098  fflush(m_File);
14099  }
14100 }
14101 
14102 #endif // #if VMA_RECORDING_ENABLED
14103 
14105 // VmaAllocationObjectAllocator
14106 
14107 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14108  m_Allocator(pAllocationCallbacks, 1024)
14109 {
14110 }
14111 
14112 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14113 {
14114  VmaMutexLock mutexLock(m_Mutex);
14115  return m_Allocator.Alloc();
14116 }
14117 
14118 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14119 {
14120  VmaMutexLock mutexLock(m_Mutex);
14121  m_Allocator.Free(hAlloc);
14122 }
14123 
14125 // VmaAllocator_T
14126 
14127 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14128  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14129  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14130  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14131  m_hDevice(pCreateInfo->device),
14132  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14133  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14134  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14135  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14136  m_PreferredLargeHeapBlockSize(0),
14137  m_PhysicalDevice(pCreateInfo->physicalDevice),
14138  m_CurrentFrameIndex(0),
14139  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14140  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14141  m_NextPoolId(0)
14143  ,m_pRecorder(VMA_NULL)
14144 #endif
14145 {
14146  if(VMA_DEBUG_DETECT_CORRUPTION)
14147  {
14148  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14149  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14150  }
14151 
14152  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14153 
14154 #if !(VMA_DEDICATED_ALLOCATION)
14156  {
14157  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14158  }
14159 #endif
14160 #if !(VMA_BIND_MEMORY2)
14161  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14162  {
14163  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14164  }
14165 #endif
14166 
14167  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14168  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14169  memset(&m_MemProps, 0, sizeof(m_MemProps));
14170 
14171  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14172  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14173  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14174 
14175  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14176  {
14177  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14178  }
14179 
14180  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14181  {
14182  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14183  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14184  }
14185 
14186  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14187 
14188  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14189  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14190 
14191  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14192  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14193  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14194  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14195 
14196  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14197  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14198 
14199  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14200  {
14201  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14202  {
14203  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14204  if(limit != VK_WHOLE_SIZE)
14205  {
14206  m_HeapSizeLimit[heapIndex] = limit;
14207  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14208  {
14209  m_MemProps.memoryHeaps[heapIndex].size = limit;
14210  }
14211  }
14212  }
14213  }
14214 
14215  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14216  {
14217  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14218 
14219  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14220  this,
14221  VK_NULL_HANDLE, // hParentPool
14222  memTypeIndex,
14223  preferredBlockSize,
14224  0,
14225  SIZE_MAX,
14226  GetBufferImageGranularity(),
14227  pCreateInfo->frameInUseCount,
14228  false, // isCustomPool
14229  false, // explicitBlockSize
14230  false); // linearAlgorithm
14231  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14232  // becase minBlockCount is 0.
14233  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14234 
14235  }
14236 }
14237 
14238 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14239 {
14240  VkResult res = VK_SUCCESS;
14241 
14242  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14243  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14244  {
14245 #if VMA_RECORDING_ENABLED
14246  m_pRecorder = vma_new(this, VmaRecorder)();
14247  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14248  if(res != VK_SUCCESS)
14249  {
14250  return res;
14251  }
14252  m_pRecorder->WriteConfiguration(
14253  m_PhysicalDeviceProperties,
14254  m_MemProps,
14255  m_UseKhrDedicatedAllocation,
14256  m_UseKhrBindMemory2);
14257  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14258 #else
14259  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14260  return VK_ERROR_FEATURE_NOT_PRESENT;
14261 #endif
14262  }
14263 
14264  return res;
14265 }
14266 
14267 VmaAllocator_T::~VmaAllocator_T()
14268 {
14269 #if VMA_RECORDING_ENABLED
14270  if(m_pRecorder != VMA_NULL)
14271  {
14272  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14273  vma_delete(this, m_pRecorder);
14274  }
14275 #endif
14276 
14277  VMA_ASSERT(m_Pools.empty());
14278 
14279  for(size_t i = GetMemoryTypeCount(); i--; )
14280  {
14281  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14282  {
14283  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14284  }
14285 
14286  vma_delete(this, m_pDedicatedAllocations[i]);
14287  vma_delete(this, m_pBlockVectors[i]);
14288  }
14289 }
14290 
14291 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14292 {
14293 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14294  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14295  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14296  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14297  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14298  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14299  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14300  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14301  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14302  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14303  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14304  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14305  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14306  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14307  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14308  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14309  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14310  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14311 #if VMA_DEDICATED_ALLOCATION
14312  if(m_UseKhrDedicatedAllocation)
14313  {
14314  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14315  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14316  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14317  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14318  }
14319 #endif // #if VMA_DEDICATED_ALLOCATION
14320 #if VMA_BIND_MEMORY2
14321  if(m_UseKhrBindMemory2)
14322  {
14323  m_VulkanFunctions.vkBindBufferMemory2KHR =
14324  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14325  m_VulkanFunctions.vkBindImageMemory2KHR =
14326  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14327  }
14328 #endif // #if VMA_BIND_MEMORY2
14329 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14330 
14331 #define VMA_COPY_IF_NOT_NULL(funcName) \
14332  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14333 
14334  if(pVulkanFunctions != VMA_NULL)
14335  {
14336  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14337  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14338  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14339  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14340  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14341  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14342  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14343  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14344  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14345  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14346  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14347  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14348  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14349  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14350  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14351  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14352  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14353 #if VMA_DEDICATED_ALLOCATION
14354  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14355  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14356 #endif
14357 #if VMA_BIND_MEMORY2
14358  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14359  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14360 #endif
14361  }
14362 
14363 #undef VMA_COPY_IF_NOT_NULL
14364 
14365  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14366  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14367  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14368  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14369  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14370  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14371  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14372  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14373  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14375  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14376  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14377  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14378  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14380  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14381  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14382  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14383  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14384 #if VMA_DEDICATED_ALLOCATION
14385  if(m_UseKhrDedicatedAllocation)
14386  {
14387  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14388  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14389  }
14390 #endif
14391 #if VMA_BIND_MEMORY2
14392  if(m_UseKhrBindMemory2)
14393  {
14394  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14395  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14396  }
14397 #endif
14398 }
14399 
14400 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14401 {
14402  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14403  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14404  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14405  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14406 }
14407 
14408 VkResult VmaAllocator_T::AllocateMemoryOfType(
14409  VkDeviceSize size,
14410  VkDeviceSize alignment,
14411  bool dedicatedAllocation,
14412  VkBuffer dedicatedBuffer,
14413  VkImage dedicatedImage,
14414  const VmaAllocationCreateInfo& createInfo,
14415  uint32_t memTypeIndex,
14416  VmaSuballocationType suballocType,
14417  size_t allocationCount,
14418  VmaAllocation* pAllocations)
14419 {
14420  VMA_ASSERT(pAllocations != VMA_NULL);
14421  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14422 
14423  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14424 
14425  // If memory type is not HOST_VISIBLE, disable MAPPED.
14426  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14427  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14428  {
14429  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14430  }
14431 
14432  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14433  VMA_ASSERT(blockVector);
14434 
14435  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14436  bool preferDedicatedMemory =
14437  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14438  dedicatedAllocation ||
14439  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14440  size > preferredBlockSize / 2;
14441 
14442  if(preferDedicatedMemory &&
14443  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14444  finalCreateInfo.pool == VK_NULL_HANDLE)
14445  {
14447  }
14448 
14449  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14450  {
14451  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14452  {
14453  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14454  }
14455  else
14456  {
14457  return AllocateDedicatedMemory(
14458  size,
14459  suballocType,
14460  memTypeIndex,
14461  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14462  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14463  finalCreateInfo.pUserData,
14464  dedicatedBuffer,
14465  dedicatedImage,
14466  allocationCount,
14467  pAllocations);
14468  }
14469  }
14470  else
14471  {
14472  VkResult res = blockVector->Allocate(
14473  m_CurrentFrameIndex.load(),
14474  size,
14475  alignment,
14476  finalCreateInfo,
14477  suballocType,
14478  allocationCount,
14479  pAllocations);
14480  if(res == VK_SUCCESS)
14481  {
14482  return res;
14483  }
14484 
14485  // 5. Try dedicated memory.
14486  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14487  {
14488  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14489  }
14490  else
14491  {
14492  res = AllocateDedicatedMemory(
14493  size,
14494  suballocType,
14495  memTypeIndex,
14496  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14497  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14498  finalCreateInfo.pUserData,
14499  dedicatedBuffer,
14500  dedicatedImage,
14501  allocationCount,
14502  pAllocations);
14503  if(res == VK_SUCCESS)
14504  {
14505  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14506  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14507  return VK_SUCCESS;
14508  }
14509  else
14510  {
14511  // Everything failed: Return error code.
14512  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14513  return res;
14514  }
14515  }
14516  }
14517 }
14518 
14519 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14520  VkDeviceSize size,
14521  VmaSuballocationType suballocType,
14522  uint32_t memTypeIndex,
14523  bool map,
14524  bool isUserDataString,
14525  void* pUserData,
14526  VkBuffer dedicatedBuffer,
14527  VkImage dedicatedImage,
14528  size_t allocationCount,
14529  VmaAllocation* pAllocations)
14530 {
14531  VMA_ASSERT(allocationCount > 0 && pAllocations);
14532 
14533  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14534  allocInfo.memoryTypeIndex = memTypeIndex;
14535  allocInfo.allocationSize = size;
14536 
14537 #if VMA_DEDICATED_ALLOCATION
14538  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14539  if(m_UseKhrDedicatedAllocation)
14540  {
14541  if(dedicatedBuffer != VK_NULL_HANDLE)
14542  {
14543  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14544  dedicatedAllocInfo.buffer = dedicatedBuffer;
14545  allocInfo.pNext = &dedicatedAllocInfo;
14546  }
14547  else if(dedicatedImage != VK_NULL_HANDLE)
14548  {
14549  dedicatedAllocInfo.image = dedicatedImage;
14550  allocInfo.pNext = &dedicatedAllocInfo;
14551  }
14552  }
14553 #endif // #if VMA_DEDICATED_ALLOCATION
14554 
14555  size_t allocIndex;
14556  VkResult res = VK_SUCCESS;
14557  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14558  {
14559  res = AllocateDedicatedMemoryPage(
14560  size,
14561  suballocType,
14562  memTypeIndex,
14563  allocInfo,
14564  map,
14565  isUserDataString,
14566  pUserData,
14567  pAllocations + allocIndex);
14568  if(res != VK_SUCCESS)
14569  {
14570  break;
14571  }
14572  }
14573 
14574  if(res == VK_SUCCESS)
14575  {
14576  // Register them in m_pDedicatedAllocations.
14577  {
14578  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14579  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14580  VMA_ASSERT(pDedicatedAllocations);
14581  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14582  {
14583  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14584  }
14585  }
14586 
14587  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14588  }
14589  else
14590  {
14591  // Free all already created allocations.
14592  while(allocIndex--)
14593  {
14594  VmaAllocation currAlloc = pAllocations[allocIndex];
14595  VkDeviceMemory hMemory = currAlloc->GetMemory();
14596 
14597  /*
14598  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14599  before vkFreeMemory.
14600 
14601  if(currAlloc->GetMappedData() != VMA_NULL)
14602  {
14603  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14604  }
14605  */
14606 
14607  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14608 
14609  currAlloc->SetUserData(this, VMA_NULL);
14610  currAlloc->Dtor();
14611  m_AllocationObjectAllocator.Free(currAlloc);
14612  }
14613 
14614  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14615  }
14616 
14617  return res;
14618 }
14619 
14620 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14621  VkDeviceSize size,
14622  VmaSuballocationType suballocType,
14623  uint32_t memTypeIndex,
14624  const VkMemoryAllocateInfo& allocInfo,
14625  bool map,
14626  bool isUserDataString,
14627  void* pUserData,
14628  VmaAllocation* pAllocation)
14629 {
14630  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14631  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14632  if(res < 0)
14633  {
14634  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14635  return res;
14636  }
14637 
14638  void* pMappedData = VMA_NULL;
14639  if(map)
14640  {
14641  res = (*m_VulkanFunctions.vkMapMemory)(
14642  m_hDevice,
14643  hMemory,
14644  0,
14645  VK_WHOLE_SIZE,
14646  0,
14647  &pMappedData);
14648  if(res < 0)
14649  {
14650  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14651  FreeVulkanMemory(memTypeIndex, size, hMemory);
14652  return res;
14653  }
14654  }
14655 
14656  *pAllocation = m_AllocationObjectAllocator.Allocate();
14657  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14658  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14659  (*pAllocation)->SetUserData(this, pUserData);
14660  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14661  {
14662  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14663  }
14664 
14665  return VK_SUCCESS;
14666 }
14667 
14668 void VmaAllocator_T::GetBufferMemoryRequirements(
14669  VkBuffer hBuffer,
14670  VkMemoryRequirements& memReq,
14671  bool& requiresDedicatedAllocation,
14672  bool& prefersDedicatedAllocation) const
14673 {
14674 #if VMA_DEDICATED_ALLOCATION
14675  if(m_UseKhrDedicatedAllocation)
14676  {
14677  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14678  memReqInfo.buffer = hBuffer;
14679 
14680  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14681 
14682  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14683  memReq2.pNext = &memDedicatedReq;
14684 
14685  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14686 
14687  memReq = memReq2.memoryRequirements;
14688  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14689  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14690  }
14691  else
14692 #endif // #if VMA_DEDICATED_ALLOCATION
14693  {
14694  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14695  requiresDedicatedAllocation = false;
14696  prefersDedicatedAllocation = false;
14697  }
14698 }
14699 
14700 void VmaAllocator_T::GetImageMemoryRequirements(
14701  VkImage hImage,
14702  VkMemoryRequirements& memReq,
14703  bool& requiresDedicatedAllocation,
14704  bool& prefersDedicatedAllocation) const
14705 {
14706 #if VMA_DEDICATED_ALLOCATION
14707  if(m_UseKhrDedicatedAllocation)
14708  {
14709  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14710  memReqInfo.image = hImage;
14711 
14712  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14713 
14714  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14715  memReq2.pNext = &memDedicatedReq;
14716 
14717  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14718 
14719  memReq = memReq2.memoryRequirements;
14720  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14721  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14722  }
14723  else
14724 #endif // #if VMA_DEDICATED_ALLOCATION
14725  {
14726  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14727  requiresDedicatedAllocation = false;
14728  prefersDedicatedAllocation = false;
14729  }
14730 }
14731 
14732 VkResult VmaAllocator_T::AllocateMemory(
14733  const VkMemoryRequirements& vkMemReq,
14734  bool requiresDedicatedAllocation,
14735  bool prefersDedicatedAllocation,
14736  VkBuffer dedicatedBuffer,
14737  VkImage dedicatedImage,
14738  const VmaAllocationCreateInfo& createInfo,
14739  VmaSuballocationType suballocType,
14740  size_t allocationCount,
14741  VmaAllocation* pAllocations)
14742 {
14743  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14744 
14745  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14746 
14747  if(vkMemReq.size == 0)
14748  {
14749  return VK_ERROR_VALIDATION_FAILED_EXT;
14750  }
14751  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14752  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14753  {
14754  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14755  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14756  }
14757  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14759  {
14760  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14761  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14762  }
14763  if(requiresDedicatedAllocation)
14764  {
14765  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14766  {
14767  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14768  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14769  }
14770  if(createInfo.pool != VK_NULL_HANDLE)
14771  {
14772  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14773  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14774  }
14775  }
14776  if((createInfo.pool != VK_NULL_HANDLE) &&
14777  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14778  {
14779  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14780  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14781  }
14782 
14783  if(createInfo.pool != VK_NULL_HANDLE)
14784  {
14785  const VkDeviceSize alignmentForPool = VMA_MAX(
14786  vkMemReq.alignment,
14787  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14788 
14789  VmaAllocationCreateInfo createInfoForPool = createInfo;
14790  // If memory type is not HOST_VISIBLE, disable MAPPED.
14791  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14792  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14793  {
14794  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14795  }
14796 
14797  return createInfo.pool->m_BlockVector.Allocate(
14798  m_CurrentFrameIndex.load(),
14799  vkMemReq.size,
14800  alignmentForPool,
14801  createInfoForPool,
14802  suballocType,
14803  allocationCount,
14804  pAllocations);
14805  }
14806  else
14807  {
14808  // Bit mask of memory Vulkan types acceptable for this allocation.
14809  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14810  uint32_t memTypeIndex = UINT32_MAX;
14811  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14812  if(res == VK_SUCCESS)
14813  {
14814  VkDeviceSize alignmentForMemType = VMA_MAX(
14815  vkMemReq.alignment,
14816  GetMemoryTypeMinAlignment(memTypeIndex));
14817 
14818  res = AllocateMemoryOfType(
14819  vkMemReq.size,
14820  alignmentForMemType,
14821  requiresDedicatedAllocation || prefersDedicatedAllocation,
14822  dedicatedBuffer,
14823  dedicatedImage,
14824  createInfo,
14825  memTypeIndex,
14826  suballocType,
14827  allocationCount,
14828  pAllocations);
14829  // Succeeded on first try.
14830  if(res == VK_SUCCESS)
14831  {
14832  return res;
14833  }
14834  // Allocation from this memory type failed. Try other compatible memory types.
14835  else
14836  {
14837  for(;;)
14838  {
14839  // Remove old memTypeIndex from list of possibilities.
14840  memoryTypeBits &= ~(1u << memTypeIndex);
14841  // Find alternative memTypeIndex.
14842  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14843  if(res == VK_SUCCESS)
14844  {
14845  alignmentForMemType = VMA_MAX(
14846  vkMemReq.alignment,
14847  GetMemoryTypeMinAlignment(memTypeIndex));
14848 
14849  res = AllocateMemoryOfType(
14850  vkMemReq.size,
14851  alignmentForMemType,
14852  requiresDedicatedAllocation || prefersDedicatedAllocation,
14853  dedicatedBuffer,
14854  dedicatedImage,
14855  createInfo,
14856  memTypeIndex,
14857  suballocType,
14858  allocationCount,
14859  pAllocations);
14860  // Allocation from this alternative memory type succeeded.
14861  if(res == VK_SUCCESS)
14862  {
14863  return res;
14864  }
14865  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14866  }
14867  // No other matching memory type index could be found.
14868  else
14869  {
14870  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14871  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14872  }
14873  }
14874  }
14875  }
14876  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14877  else
14878  return res;
14879  }
14880 }
14881 
14882 void VmaAllocator_T::FreeMemory(
14883  size_t allocationCount,
14884  const VmaAllocation* pAllocations)
14885 {
14886  VMA_ASSERT(pAllocations);
14887 
14888  for(size_t allocIndex = allocationCount; allocIndex--; )
14889  {
14890  VmaAllocation allocation = pAllocations[allocIndex];
14891 
14892  if(allocation != VK_NULL_HANDLE)
14893  {
14894  if(TouchAllocation(allocation))
14895  {
14896  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14897  {
14898  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14899  }
14900 
14901  switch(allocation->GetType())
14902  {
14903  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14904  {
14905  VmaBlockVector* pBlockVector = VMA_NULL;
14906  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14907  if(hPool != VK_NULL_HANDLE)
14908  {
14909  pBlockVector = &hPool->m_BlockVector;
14910  }
14911  else
14912  {
14913  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14914  pBlockVector = m_pBlockVectors[memTypeIndex];
14915  }
14916  pBlockVector->Free(allocation);
14917  }
14918  break;
14919  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14920  FreeDedicatedMemory(allocation);
14921  break;
14922  default:
14923  VMA_ASSERT(0);
14924  }
14925  }
14926 
14927  allocation->SetUserData(this, VMA_NULL);
14928  allocation->Dtor();
14929  m_AllocationObjectAllocator.Free(allocation);
14930  }
14931  }
14932 }
14933 
14934 VkResult VmaAllocator_T::ResizeAllocation(
14935  const VmaAllocation alloc,
14936  VkDeviceSize newSize)
14937 {
14938  // This function is deprecated and so it does nothing. It's left for backward compatibility.
14939  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14940  {
14941  return VK_ERROR_VALIDATION_FAILED_EXT;
14942  }
14943  if(newSize == alloc->GetSize())
14944  {
14945  return VK_SUCCESS;
14946  }
14947  return VK_ERROR_OUT_OF_POOL_MEMORY;
14948 }
14949 
14950 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14951 {
14952  // Initialize.
14953  InitStatInfo(pStats->total);
14954  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14955  InitStatInfo(pStats->memoryType[i]);
14956  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14957  InitStatInfo(pStats->memoryHeap[i]);
14958 
14959  // Process default pools.
14960  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14961  {
14962  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14963  VMA_ASSERT(pBlockVector);
14964  pBlockVector->AddStats(pStats);
14965  }
14966 
14967  // Process custom pools.
14968  {
14969  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14970  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14971  {
14972  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14973  }
14974  }
14975 
14976  // Process dedicated allocations.
14977  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14978  {
14979  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14980  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14981  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14982  VMA_ASSERT(pDedicatedAllocVector);
14983  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14984  {
14985  VmaStatInfo allocationStatInfo;
14986  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14987  VmaAddStatInfo(pStats->total, allocationStatInfo);
14988  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14989  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14990  }
14991  }
14992 
14993  // Postprocess.
14994  VmaPostprocessCalcStatInfo(pStats->total);
14995  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14996  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14997  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14998  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14999 }
15000 
15001 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15002 
15003 VkResult VmaAllocator_T::DefragmentationBegin(
15004  const VmaDefragmentationInfo2& info,
15005  VmaDefragmentationStats* pStats,
15006  VmaDefragmentationContext* pContext)
15007 {
15008  if(info.pAllocationsChanged != VMA_NULL)
15009  {
15010  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15011  }
15012 
15013  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15014  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15015 
15016  (*pContext)->AddPools(info.poolCount, info.pPools);
15017  (*pContext)->AddAllocations(
15019 
15020  VkResult res = (*pContext)->Defragment(
15023  info.commandBuffer, pStats);
15024 
15025  if(res != VK_NOT_READY)
15026  {
15027  vma_delete(this, *pContext);
15028  *pContext = VMA_NULL;
15029  }
15030 
15031  return res;
15032 }
15033 
15034 VkResult VmaAllocator_T::DefragmentationEnd(
15035  VmaDefragmentationContext context)
15036 {
15037  vma_delete(this, context);
15038  return VK_SUCCESS;
15039 }
15040 
15041 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15042 {
15043  if(hAllocation->CanBecomeLost())
15044  {
15045  /*
15046  Warning: This is a carefully designed algorithm.
15047  Do not modify unless you really know what you're doing :)
15048  */
15049  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15050  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15051  for(;;)
15052  {
15053  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15054  {
15055  pAllocationInfo->memoryType = UINT32_MAX;
15056  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15057  pAllocationInfo->offset = 0;
15058  pAllocationInfo->size = hAllocation->GetSize();
15059  pAllocationInfo->pMappedData = VMA_NULL;
15060  pAllocationInfo->pUserData = hAllocation->GetUserData();
15061  return;
15062  }
15063  else if(localLastUseFrameIndex == localCurrFrameIndex)
15064  {
15065  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15066  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15067  pAllocationInfo->offset = hAllocation->GetOffset();
15068  pAllocationInfo->size = hAllocation->GetSize();
15069  pAllocationInfo->pMappedData = VMA_NULL;
15070  pAllocationInfo->pUserData = hAllocation->GetUserData();
15071  return;
15072  }
15073  else // Last use time earlier than current time.
15074  {
15075  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15076  {
15077  localLastUseFrameIndex = localCurrFrameIndex;
15078  }
15079  }
15080  }
15081  }
15082  else
15083  {
15084 #if VMA_STATS_STRING_ENABLED
15085  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15086  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15087  for(;;)
15088  {
15089  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15090  if(localLastUseFrameIndex == localCurrFrameIndex)
15091  {
15092  break;
15093  }
15094  else // Last use time earlier than current time.
15095  {
15096  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15097  {
15098  localLastUseFrameIndex = localCurrFrameIndex;
15099  }
15100  }
15101  }
15102 #endif
15103 
15104  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15105  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15106  pAllocationInfo->offset = hAllocation->GetOffset();
15107  pAllocationInfo->size = hAllocation->GetSize();
15108  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15109  pAllocationInfo->pUserData = hAllocation->GetUserData();
15110  }
15111 }
15112 
15113 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15114 {
15115  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15116  if(hAllocation->CanBecomeLost())
15117  {
15118  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15119  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15120  for(;;)
15121  {
15122  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15123  {
15124  return false;
15125  }
15126  else if(localLastUseFrameIndex == localCurrFrameIndex)
15127  {
15128  return true;
15129  }
15130  else // Last use time earlier than current time.
15131  {
15132  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15133  {
15134  localLastUseFrameIndex = localCurrFrameIndex;
15135  }
15136  }
15137  }
15138  }
15139  else
15140  {
15141 #if VMA_STATS_STRING_ENABLED
15142  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15143  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15144  for(;;)
15145  {
15146  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15147  if(localLastUseFrameIndex == localCurrFrameIndex)
15148  {
15149  break;
15150  }
15151  else // Last use time earlier than current time.
15152  {
15153  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15154  {
15155  localLastUseFrameIndex = localCurrFrameIndex;
15156  }
15157  }
15158  }
15159 #endif
15160 
15161  return true;
15162  }
15163 }
15164 
15165 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15166 {
15167  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15168 
15169  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15170 
15171  if(newCreateInfo.maxBlockCount == 0)
15172  {
15173  newCreateInfo.maxBlockCount = SIZE_MAX;
15174  }
15175  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15176  {
15177  return VK_ERROR_INITIALIZATION_FAILED;
15178  }
15179 
15180  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15181 
15182  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15183 
15184  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15185  if(res != VK_SUCCESS)
15186  {
15187  vma_delete(this, *pPool);
15188  *pPool = VMA_NULL;
15189  return res;
15190  }
15191 
15192  // Add to m_Pools.
15193  {
15194  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15195  (*pPool)->SetId(m_NextPoolId++);
15196  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15197  }
15198 
15199  return VK_SUCCESS;
15200 }
15201 
15202 void VmaAllocator_T::DestroyPool(VmaPool pool)
15203 {
15204  // Remove from m_Pools.
15205  {
15206  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15207  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15208  VMA_ASSERT(success && "Pool not found in Allocator.");
15209  }
15210 
15211  vma_delete(this, pool);
15212 }
15213 
15214 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15215 {
15216  pool->m_BlockVector.GetPoolStats(pPoolStats);
15217 }
15218 
15219 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15220 {
15221  m_CurrentFrameIndex.store(frameIndex);
15222 }
15223 
15224 void VmaAllocator_T::MakePoolAllocationsLost(
15225  VmaPool hPool,
15226  size_t* pLostAllocationCount)
15227 {
15228  hPool->m_BlockVector.MakePoolAllocationsLost(
15229  m_CurrentFrameIndex.load(),
15230  pLostAllocationCount);
15231 }
15232 
15233 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15234 {
15235  return hPool->m_BlockVector.CheckCorruption();
15236 }
15237 
15238 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15239 {
15240  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15241 
15242  // Process default pools.
15243  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15244  {
15245  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15246  {
15247  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15248  VMA_ASSERT(pBlockVector);
15249  VkResult localRes = pBlockVector->CheckCorruption();
15250  switch(localRes)
15251  {
15252  case VK_ERROR_FEATURE_NOT_PRESENT:
15253  break;
15254  case VK_SUCCESS:
15255  finalRes = VK_SUCCESS;
15256  break;
15257  default:
15258  return localRes;
15259  }
15260  }
15261  }
15262 
15263  // Process custom pools.
15264  {
15265  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15266  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15267  {
15268  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15269  {
15270  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15271  switch(localRes)
15272  {
15273  case VK_ERROR_FEATURE_NOT_PRESENT:
15274  break;
15275  case VK_SUCCESS:
15276  finalRes = VK_SUCCESS;
15277  break;
15278  default:
15279  return localRes;
15280  }
15281  }
15282  }
15283  }
15284 
15285  return finalRes;
15286 }
15287 
15288 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15289 {
15290  *pAllocation = m_AllocationObjectAllocator.Allocate();
15291  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15292  (*pAllocation)->InitLost();
15293 }
15294 
15295 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15296 {
15297  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15298 
15299  VkResult res;
15300  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15301  {
15302  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15303  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15304  {
15305  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15306  if(res == VK_SUCCESS)
15307  {
15308  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15309  }
15310  }
15311  else
15312  {
15313  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15314  }
15315  }
15316  else
15317  {
15318  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15319  }
15320 
15321  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15322  {
15323  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15324  }
15325 
15326  return res;
15327 }
15328 
15329 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15330 {
15331  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15332  {
15333  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15334  }
15335 
15336  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15337 
15338  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15339  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15340  {
15341  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15342  m_HeapSizeLimit[heapIndex] += size;
15343  }
15344 }
15345 
15346 VkResult VmaAllocator_T::BindVulkanBuffer(
15347  VkDeviceMemory memory,
15348  VkDeviceSize memoryOffset,
15349  VkBuffer buffer,
15350  const void* pNext)
15351 {
15352  if(pNext != VMA_NULL)
15353  {
15354 #if VMA_BIND_MEMORY2
15355  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15356  {
15357  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15358  bindBufferMemoryInfo.pNext = pNext;
15359  bindBufferMemoryInfo.buffer = buffer;
15360  bindBufferMemoryInfo.memory = memory;
15361  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15362  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15363  }
15364  else
15365 #endif // #if VMA_BIND_MEMORY2
15366  {
15367  return VK_ERROR_EXTENSION_NOT_PRESENT;
15368  }
15369  }
15370  else
15371  {
15372  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15373  }
15374 }
15375 
15376 VkResult VmaAllocator_T::BindVulkanImage(
15377  VkDeviceMemory memory,
15378  VkDeviceSize memoryOffset,
15379  VkImage image,
15380  const void* pNext)
15381 {
15382  if(pNext != VMA_NULL)
15383  {
15384 #if VMA_BIND_MEMORY2
15385  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15386  {
15387  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15388  bindBufferMemoryInfo.pNext = pNext;
15389  bindBufferMemoryInfo.image = image;
15390  bindBufferMemoryInfo.memory = memory;
15391  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15392  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15393  }
15394  else
15395 #endif // #if VMA_BIND_MEMORY2
15396  {
15397  return VK_ERROR_EXTENSION_NOT_PRESENT;
15398  }
15399  }
15400  else
15401  {
15402  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15403  }
15404 }
15405 
15406 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15407 {
15408  if(hAllocation->CanBecomeLost())
15409  {
15410  return VK_ERROR_MEMORY_MAP_FAILED;
15411  }
15412 
15413  switch(hAllocation->GetType())
15414  {
15415  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15416  {
15417  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15418  char *pBytes = VMA_NULL;
15419  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15420  if(res == VK_SUCCESS)
15421  {
15422  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15423  hAllocation->BlockAllocMap();
15424  }
15425  return res;
15426  }
15427  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15428  return hAllocation->DedicatedAllocMap(this, ppData);
15429  default:
15430  VMA_ASSERT(0);
15431  return VK_ERROR_MEMORY_MAP_FAILED;
15432  }
15433 }
15434 
15435 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15436 {
15437  switch(hAllocation->GetType())
15438  {
15439  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15440  {
15441  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15442  hAllocation->BlockAllocUnmap();
15443  pBlock->Unmap(this, 1);
15444  }
15445  break;
15446  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15447  hAllocation->DedicatedAllocUnmap(this);
15448  break;
15449  default:
15450  VMA_ASSERT(0);
15451  }
15452 }
15453 
15454 VkResult VmaAllocator_T::BindBufferMemory(
15455  VmaAllocation hAllocation,
15456  VkDeviceSize allocationLocalOffset,
15457  VkBuffer hBuffer,
15458  const void* pNext)
15459 {
15460  VkResult res = VK_SUCCESS;
15461  switch(hAllocation->GetType())
15462  {
15463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15464  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
15465  break;
15466  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15467  {
15468  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15469  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15470  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
15471  break;
15472  }
15473  default:
15474  VMA_ASSERT(0);
15475  }
15476  return res;
15477 }
15478 
15479 VkResult VmaAllocator_T::BindImageMemory(
15480  VmaAllocation hAllocation,
15481  VkDeviceSize allocationLocalOffset,
15482  VkImage hImage,
15483  const void* pNext)
15484 {
15485  VkResult res = VK_SUCCESS;
15486  switch(hAllocation->GetType())
15487  {
15488  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15489  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
15490  break;
15491  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15492  {
15493  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15494  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15495  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
15496  break;
15497  }
15498  default:
15499  VMA_ASSERT(0);
15500  }
15501  return res;
15502 }
15503 
15504 void VmaAllocator_T::FlushOrInvalidateAllocation(
15505  VmaAllocation hAllocation,
15506  VkDeviceSize offset, VkDeviceSize size,
15507  VMA_CACHE_OPERATION op)
15508 {
15509  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15510  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15511  {
15512  const VkDeviceSize allocationSize = hAllocation->GetSize();
15513  VMA_ASSERT(offset <= allocationSize);
15514 
15515  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15516 
15517  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15518  memRange.memory = hAllocation->GetMemory();
15519 
15520  switch(hAllocation->GetType())
15521  {
15522  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15523  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15524  if(size == VK_WHOLE_SIZE)
15525  {
15526  memRange.size = allocationSize - memRange.offset;
15527  }
15528  else
15529  {
15530  VMA_ASSERT(offset + size <= allocationSize);
15531  memRange.size = VMA_MIN(
15532  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15533  allocationSize - memRange.offset);
15534  }
15535  break;
15536 
15537  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15538  {
15539  // 1. Still within this allocation.
15540  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15541  if(size == VK_WHOLE_SIZE)
15542  {
15543  size = allocationSize - offset;
15544  }
15545  else
15546  {
15547  VMA_ASSERT(offset + size <= allocationSize);
15548  }
15549  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15550 
15551  // 2. Adjust to whole block.
15552  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15553  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15554  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15555  memRange.offset += allocationOffset;
15556  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15557 
15558  break;
15559  }
15560 
15561  default:
15562  VMA_ASSERT(0);
15563  }
15564 
15565  switch(op)
15566  {
15567  case VMA_CACHE_FLUSH:
15568  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15569  break;
15570  case VMA_CACHE_INVALIDATE:
15571  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15572  break;
15573  default:
15574  VMA_ASSERT(0);
15575  }
15576  }
15577  // else: Just ignore this call.
15578 }
15579 
15580 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15581 {
15582  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15583 
15584  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15585  {
15586  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15587  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15588  VMA_ASSERT(pDedicatedAllocations);
15589  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15590  VMA_ASSERT(success);
15591  }
15592 
15593  VkDeviceMemory hMemory = allocation->GetMemory();
15594 
15595  /*
15596  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15597  before vkFreeMemory.
15598 
15599  if(allocation->GetMappedData() != VMA_NULL)
15600  {
15601  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15602  }
15603  */
15604 
15605  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15606 
15607  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15608 }
15609 
15610 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15611 {
15612  VkBufferCreateInfo dummyBufCreateInfo;
15613  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15614 
15615  uint32_t memoryTypeBits = 0;
15616 
15617  // Create buffer.
15618  VkBuffer buf = VK_NULL_HANDLE;
15619  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15620  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15621  if(res == VK_SUCCESS)
15622  {
15623  // Query for supported memory types.
15624  VkMemoryRequirements memReq;
15625  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15626  memoryTypeBits = memReq.memoryTypeBits;
15627 
15628  // Destroy buffer.
15629  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15630  }
15631 
15632  return memoryTypeBits;
15633 }
15634 
15635 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15636 {
15637  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15638  !hAllocation->CanBecomeLost() &&
15639  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15640  {
15641  void* pData = VMA_NULL;
15642  VkResult res = Map(hAllocation, &pData);
15643  if(res == VK_SUCCESS)
15644  {
15645  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15646  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15647  Unmap(hAllocation);
15648  }
15649  else
15650  {
15651  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15652  }
15653  }
15654 }
15655 
15656 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15657 {
15658  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15659  if(memoryTypeBits == UINT32_MAX)
15660  {
15661  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15662  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15663  }
15664  return memoryTypeBits;
15665 }
15666 
15667 #if VMA_STATS_STRING_ENABLED
15668 
15669 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15670 {
15671  bool dedicatedAllocationsStarted = false;
15672  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15673  {
15674  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15675  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15676  VMA_ASSERT(pDedicatedAllocVector);
15677  if(pDedicatedAllocVector->empty() == false)
15678  {
15679  if(dedicatedAllocationsStarted == false)
15680  {
15681  dedicatedAllocationsStarted = true;
15682  json.WriteString("DedicatedAllocations");
15683  json.BeginObject();
15684  }
15685 
15686  json.BeginString("Type ");
15687  json.ContinueString(memTypeIndex);
15688  json.EndString();
15689 
15690  json.BeginArray();
15691 
15692  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15693  {
15694  json.BeginObject(true);
15695  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15696  hAlloc->PrintParameters(json);
15697  json.EndObject();
15698  }
15699 
15700  json.EndArray();
15701  }
15702  }
15703  if(dedicatedAllocationsStarted)
15704  {
15705  json.EndObject();
15706  }
15707 
15708  {
15709  bool allocationsStarted = false;
15710  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15711  {
15712  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15713  {
15714  if(allocationsStarted == false)
15715  {
15716  allocationsStarted = true;
15717  json.WriteString("DefaultPools");
15718  json.BeginObject();
15719  }
15720 
15721  json.BeginString("Type ");
15722  json.ContinueString(memTypeIndex);
15723  json.EndString();
15724 
15725  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15726  }
15727  }
15728  if(allocationsStarted)
15729  {
15730  json.EndObject();
15731  }
15732  }
15733 
15734  // Custom pools
15735  {
15736  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15737  const size_t poolCount = m_Pools.size();
15738  if(poolCount > 0)
15739  {
15740  json.WriteString("Pools");
15741  json.BeginObject();
15742  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15743  {
15744  json.BeginString();
15745  json.ContinueString(m_Pools[poolIndex]->GetId());
15746  json.EndString();
15747 
15748  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15749  }
15750  json.EndObject();
15751  }
15752  }
15753 }
15754 
15755 #endif // #if VMA_STATS_STRING_ENABLED
15756 
15758 // Public interface
15759 
15760 VkResult vmaCreateAllocator(
15761  const VmaAllocatorCreateInfo* pCreateInfo,
15762  VmaAllocator* pAllocator)
15763 {
15764  VMA_ASSERT(pCreateInfo && pAllocator);
15765  VMA_DEBUG_LOG("vmaCreateAllocator");
15766  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15767  return (*pAllocator)->Init(pCreateInfo);
15768 }
15769 
15770 void vmaDestroyAllocator(
15771  VmaAllocator allocator)
15772 {
15773  if(allocator != VK_NULL_HANDLE)
15774  {
15775  VMA_DEBUG_LOG("vmaDestroyAllocator");
15776  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15777  vma_delete(&allocationCallbacks, allocator);
15778  }
15779 }
15780 
15782  VmaAllocator allocator,
15783  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15784 {
15785  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15786  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15787 }
15788 
15790  VmaAllocator allocator,
15791  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15792 {
15793  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15794  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15795 }
15796 
15798  VmaAllocator allocator,
15799  uint32_t memoryTypeIndex,
15800  VkMemoryPropertyFlags* pFlags)
15801 {
15802  VMA_ASSERT(allocator && pFlags);
15803  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15804  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15805 }
15806 
15808  VmaAllocator allocator,
15809  uint32_t frameIndex)
15810 {
15811  VMA_ASSERT(allocator);
15812  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15813 
15814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15815 
15816  allocator->SetCurrentFrameIndex(frameIndex);
15817 }
15818 
15819 void vmaCalculateStats(
15820  VmaAllocator allocator,
15821  VmaStats* pStats)
15822 {
15823  VMA_ASSERT(allocator && pStats);
15824  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15825  allocator->CalculateStats(pStats);
15826 }
15827 
15828 #if VMA_STATS_STRING_ENABLED
15829 
15830 void vmaBuildStatsString(
15831  VmaAllocator allocator,
15832  char** ppStatsString,
15833  VkBool32 detailedMap)
15834 {
15835  VMA_ASSERT(allocator && ppStatsString);
15836  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15837 
15838  VmaStringBuilder sb(allocator);
15839  {
15840  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15841  json.BeginObject();
15842 
15843  VmaStats stats;
15844  allocator->CalculateStats(&stats);
15845 
15846  json.WriteString("Total");
15847  VmaPrintStatInfo(json, stats.total);
15848 
15849  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15850  {
15851  json.BeginString("Heap ");
15852  json.ContinueString(heapIndex);
15853  json.EndString();
15854  json.BeginObject();
15855 
15856  json.WriteString("Size");
15857  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15858 
15859  json.WriteString("Flags");
15860  json.BeginArray(true);
15861  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15862  {
15863  json.WriteString("DEVICE_LOCAL");
15864  }
15865  json.EndArray();
15866 
15867  if(stats.memoryHeap[heapIndex].blockCount > 0)
15868  {
15869  json.WriteString("Stats");
15870  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15871  }
15872 
15873  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15874  {
15875  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15876  {
15877  json.BeginString("Type ");
15878  json.ContinueString(typeIndex);
15879  json.EndString();
15880 
15881  json.BeginObject();
15882 
15883  json.WriteString("Flags");
15884  json.BeginArray(true);
15885  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15886  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15887  {
15888  json.WriteString("DEVICE_LOCAL");
15889  }
15890  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15891  {
15892  json.WriteString("HOST_VISIBLE");
15893  }
15894  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15895  {
15896  json.WriteString("HOST_COHERENT");
15897  }
15898  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15899  {
15900  json.WriteString("HOST_CACHED");
15901  }
15902  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15903  {
15904  json.WriteString("LAZILY_ALLOCATED");
15905  }
15906  json.EndArray();
15907 
15908  if(stats.memoryType[typeIndex].blockCount > 0)
15909  {
15910  json.WriteString("Stats");
15911  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15912  }
15913 
15914  json.EndObject();
15915  }
15916  }
15917 
15918  json.EndObject();
15919  }
15920  if(detailedMap == VK_TRUE)
15921  {
15922  allocator->PrintDetailedMap(json);
15923  }
15924 
15925  json.EndObject();
15926  }
15927 
15928  const size_t len = sb.GetLength();
15929  char* const pChars = vma_new_array(allocator, char, len + 1);
15930  if(len > 0)
15931  {
15932  memcpy(pChars, sb.GetData(), len);
15933  }
15934  pChars[len] = '\0';
15935  *ppStatsString = pChars;
15936 }
15937 
15938 void vmaFreeStatsString(
15939  VmaAllocator allocator,
15940  char* pStatsString)
15941 {
15942  if(pStatsString != VMA_NULL)
15943  {
15944  VMA_ASSERT(allocator);
15945  size_t len = strlen(pStatsString);
15946  vma_delete_array(allocator, pStatsString, len + 1);
15947  }
15948 }
15949 
15950 #endif // #if VMA_STATS_STRING_ENABLED
15951 
15952 /*
15953 This function is not protected by any mutex because it just reads immutable data.
15954 */
15955 VkResult vmaFindMemoryTypeIndex(
15956  VmaAllocator allocator,
15957  uint32_t memoryTypeBits,
15958  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15959  uint32_t* pMemoryTypeIndex)
15960 {
15961  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15962  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15963  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15964 
15965  if(pAllocationCreateInfo->memoryTypeBits != 0)
15966  {
15967  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15968  }
15969 
15970  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15971  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15972 
15973  // Convert usage to requiredFlags and preferredFlags.
15974  switch(pAllocationCreateInfo->usage)
15975  {
15977  break;
15979  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15980  {
15981  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15982  }
15983  break;
15985  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15986  break;
15988  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15989  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15990  {
15991  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15992  }
15993  break;
15995  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15996  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15997  break;
15998  default:
15999  break;
16000  }
16001 
16002  *pMemoryTypeIndex = UINT32_MAX;
16003  uint32_t minCost = UINT32_MAX;
16004  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16005  memTypeIndex < allocator->GetMemoryTypeCount();
16006  ++memTypeIndex, memTypeBit <<= 1)
16007  {
16008  // This memory type is acceptable according to memoryTypeBits bitmask.
16009  if((memTypeBit & memoryTypeBits) != 0)
16010  {
16011  const VkMemoryPropertyFlags currFlags =
16012  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16013  // This memory type contains requiredFlags.
16014  if((requiredFlags & ~currFlags) == 0)
16015  {
16016  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16017  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
16018  // Remember memory type with lowest cost.
16019  if(currCost < minCost)
16020  {
16021  *pMemoryTypeIndex = memTypeIndex;
16022  if(currCost == 0)
16023  {
16024  return VK_SUCCESS;
16025  }
16026  minCost = currCost;
16027  }
16028  }
16029  }
16030  }
16031  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16032 }
16033 
16035  VmaAllocator allocator,
16036  const VkBufferCreateInfo* pBufferCreateInfo,
16037  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16038  uint32_t* pMemoryTypeIndex)
16039 {
16040  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16041  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16042  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16043  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16044 
16045  const VkDevice hDev = allocator->m_hDevice;
16046  VkBuffer hBuffer = VK_NULL_HANDLE;
16047  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16048  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16049  if(res == VK_SUCCESS)
16050  {
16051  VkMemoryRequirements memReq = {};
16052  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16053  hDev, hBuffer, &memReq);
16054 
16055  res = vmaFindMemoryTypeIndex(
16056  allocator,
16057  memReq.memoryTypeBits,
16058  pAllocationCreateInfo,
16059  pMemoryTypeIndex);
16060 
16061  allocator->GetVulkanFunctions().vkDestroyBuffer(
16062  hDev, hBuffer, allocator->GetAllocationCallbacks());
16063  }
16064  return res;
16065 }
16066 
16068  VmaAllocator allocator,
16069  const VkImageCreateInfo* pImageCreateInfo,
16070  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16071  uint32_t* pMemoryTypeIndex)
16072 {
16073  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16074  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16075  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16076  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16077 
16078  const VkDevice hDev = allocator->m_hDevice;
16079  VkImage hImage = VK_NULL_HANDLE;
16080  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16081  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16082  if(res == VK_SUCCESS)
16083  {
16084  VkMemoryRequirements memReq = {};
16085  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16086  hDev, hImage, &memReq);
16087 
16088  res = vmaFindMemoryTypeIndex(
16089  allocator,
16090  memReq.memoryTypeBits,
16091  pAllocationCreateInfo,
16092  pMemoryTypeIndex);
16093 
16094  allocator->GetVulkanFunctions().vkDestroyImage(
16095  hDev, hImage, allocator->GetAllocationCallbacks());
16096  }
16097  return res;
16098 }
16099 
16100 VkResult vmaCreatePool(
16101  VmaAllocator allocator,
16102  const VmaPoolCreateInfo* pCreateInfo,
16103  VmaPool* pPool)
16104 {
16105  VMA_ASSERT(allocator && pCreateInfo && pPool);
16106 
16107  VMA_DEBUG_LOG("vmaCreatePool");
16108 
16109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16110 
16111  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16112 
16113 #if VMA_RECORDING_ENABLED
16114  if(allocator->GetRecorder() != VMA_NULL)
16115  {
16116  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16117  }
16118 #endif
16119 
16120  return res;
16121 }
16122 
16123 void vmaDestroyPool(
16124  VmaAllocator allocator,
16125  VmaPool pool)
16126 {
16127  VMA_ASSERT(allocator);
16128 
16129  if(pool == VK_NULL_HANDLE)
16130  {
16131  return;
16132  }
16133 
16134  VMA_DEBUG_LOG("vmaDestroyPool");
16135 
16136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16137 
16138 #if VMA_RECORDING_ENABLED
16139  if(allocator->GetRecorder() != VMA_NULL)
16140  {
16141  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16142  }
16143 #endif
16144 
16145  allocator->DestroyPool(pool);
16146 }
16147 
16148 void vmaGetPoolStats(
16149  VmaAllocator allocator,
16150  VmaPool pool,
16151  VmaPoolStats* pPoolStats)
16152 {
16153  VMA_ASSERT(allocator && pool && pPoolStats);
16154 
16155  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16156 
16157  allocator->GetPoolStats(pool, pPoolStats);
16158 }
16159 
16161  VmaAllocator allocator,
16162  VmaPool pool,
16163  size_t* pLostAllocationCount)
16164 {
16165  VMA_ASSERT(allocator && pool);
16166 
16167  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16168 
16169 #if VMA_RECORDING_ENABLED
16170  if(allocator->GetRecorder() != VMA_NULL)
16171  {
16172  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16173  }
16174 #endif
16175 
16176  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16177 }
16178 
16179 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16180 {
16181  VMA_ASSERT(allocator && pool);
16182 
16183  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16184 
16185  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16186 
16187  return allocator->CheckPoolCorruption(pool);
16188 }
16189 
16190 VkResult vmaAllocateMemory(
16191  VmaAllocator allocator,
16192  const VkMemoryRequirements* pVkMemoryRequirements,
16193  const VmaAllocationCreateInfo* pCreateInfo,
16194  VmaAllocation* pAllocation,
16195  VmaAllocationInfo* pAllocationInfo)
16196 {
16197  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16198 
16199  VMA_DEBUG_LOG("vmaAllocateMemory");
16200 
16201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16202 
16203  VkResult result = allocator->AllocateMemory(
16204  *pVkMemoryRequirements,
16205  false, // requiresDedicatedAllocation
16206  false, // prefersDedicatedAllocation
16207  VK_NULL_HANDLE, // dedicatedBuffer
16208  VK_NULL_HANDLE, // dedicatedImage
16209  *pCreateInfo,
16210  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16211  1, // allocationCount
16212  pAllocation);
16213 
16214 #if VMA_RECORDING_ENABLED
16215  if(allocator->GetRecorder() != VMA_NULL)
16216  {
16217  allocator->GetRecorder()->RecordAllocateMemory(
16218  allocator->GetCurrentFrameIndex(),
16219  *pVkMemoryRequirements,
16220  *pCreateInfo,
16221  *pAllocation);
16222  }
16223 #endif
16224 
16225  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16226  {
16227  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16228  }
16229 
16230  return result;
16231 }
16232 
16233 VkResult vmaAllocateMemoryPages(
16234  VmaAllocator allocator,
16235  const VkMemoryRequirements* pVkMemoryRequirements,
16236  const VmaAllocationCreateInfo* pCreateInfo,
16237  size_t allocationCount,
16238  VmaAllocation* pAllocations,
16239  VmaAllocationInfo* pAllocationInfo)
16240 {
16241  if(allocationCount == 0)
16242  {
16243  return VK_SUCCESS;
16244  }
16245 
16246  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16247 
16248  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16249 
16250  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16251 
16252  VkResult result = allocator->AllocateMemory(
16253  *pVkMemoryRequirements,
16254  false, // requiresDedicatedAllocation
16255  false, // prefersDedicatedAllocation
16256  VK_NULL_HANDLE, // dedicatedBuffer
16257  VK_NULL_HANDLE, // dedicatedImage
16258  *pCreateInfo,
16259  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16260  allocationCount,
16261  pAllocations);
16262 
16263 #if VMA_RECORDING_ENABLED
16264  if(allocator->GetRecorder() != VMA_NULL)
16265  {
16266  allocator->GetRecorder()->RecordAllocateMemoryPages(
16267  allocator->GetCurrentFrameIndex(),
16268  *pVkMemoryRequirements,
16269  *pCreateInfo,
16270  (uint64_t)allocationCount,
16271  pAllocations);
16272  }
16273 #endif
16274 
16275  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16276  {
16277  for(size_t i = 0; i < allocationCount; ++i)
16278  {
16279  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16280  }
16281  }
16282 
16283  return result;
16284 }
16285 
16287  VmaAllocator allocator,
16288  VkBuffer buffer,
16289  const VmaAllocationCreateInfo* pCreateInfo,
16290  VmaAllocation* pAllocation,
16291  VmaAllocationInfo* pAllocationInfo)
16292 {
16293  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16294 
16295  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16296 
16297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16298 
16299  VkMemoryRequirements vkMemReq = {};
16300  bool requiresDedicatedAllocation = false;
16301  bool prefersDedicatedAllocation = false;
16302  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16303  requiresDedicatedAllocation,
16304  prefersDedicatedAllocation);
16305 
16306  VkResult result = allocator->AllocateMemory(
16307  vkMemReq,
16308  requiresDedicatedAllocation,
16309  prefersDedicatedAllocation,
16310  buffer, // dedicatedBuffer
16311  VK_NULL_HANDLE, // dedicatedImage
16312  *pCreateInfo,
16313  VMA_SUBALLOCATION_TYPE_BUFFER,
16314  1, // allocationCount
16315  pAllocation);
16316 
16317 #if VMA_RECORDING_ENABLED
16318  if(allocator->GetRecorder() != VMA_NULL)
16319  {
16320  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16321  allocator->GetCurrentFrameIndex(),
16322  vkMemReq,
16323  requiresDedicatedAllocation,
16324  prefersDedicatedAllocation,
16325  *pCreateInfo,
16326  *pAllocation);
16327  }
16328 #endif
16329 
16330  if(pAllocationInfo && result == VK_SUCCESS)
16331  {
16332  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16333  }
16334 
16335  return result;
16336 }
16337 
16338 VkResult vmaAllocateMemoryForImage(
16339  VmaAllocator allocator,
16340  VkImage image,
16341  const VmaAllocationCreateInfo* pCreateInfo,
16342  VmaAllocation* pAllocation,
16343  VmaAllocationInfo* pAllocationInfo)
16344 {
16345  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16346 
16347  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16348 
16349  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16350 
16351  VkMemoryRequirements vkMemReq = {};
16352  bool requiresDedicatedAllocation = false;
16353  bool prefersDedicatedAllocation = false;
16354  allocator->GetImageMemoryRequirements(image, vkMemReq,
16355  requiresDedicatedAllocation, prefersDedicatedAllocation);
16356 
16357  VkResult result = allocator->AllocateMemory(
16358  vkMemReq,
16359  requiresDedicatedAllocation,
16360  prefersDedicatedAllocation,
16361  VK_NULL_HANDLE, // dedicatedBuffer
16362  image, // dedicatedImage
16363  *pCreateInfo,
16364  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16365  1, // allocationCount
16366  pAllocation);
16367 
16368 #if VMA_RECORDING_ENABLED
16369  if(allocator->GetRecorder() != VMA_NULL)
16370  {
16371  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16372  allocator->GetCurrentFrameIndex(),
16373  vkMemReq,
16374  requiresDedicatedAllocation,
16375  prefersDedicatedAllocation,
16376  *pCreateInfo,
16377  *pAllocation);
16378  }
16379 #endif
16380 
16381  if(pAllocationInfo && result == VK_SUCCESS)
16382  {
16383  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16384  }
16385 
16386  return result;
16387 }
16388 
16389 void vmaFreeMemory(
16390  VmaAllocator allocator,
16391  VmaAllocation allocation)
16392 {
16393  VMA_ASSERT(allocator);
16394 
16395  if(allocation == VK_NULL_HANDLE)
16396  {
16397  return;
16398  }
16399 
16400  VMA_DEBUG_LOG("vmaFreeMemory");
16401 
16402  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16403 
16404 #if VMA_RECORDING_ENABLED
16405  if(allocator->GetRecorder() != VMA_NULL)
16406  {
16407  allocator->GetRecorder()->RecordFreeMemory(
16408  allocator->GetCurrentFrameIndex(),
16409  allocation);
16410  }
16411 #endif
16412 
16413  allocator->FreeMemory(
16414  1, // allocationCount
16415  &allocation);
16416 }
16417 
16418 void vmaFreeMemoryPages(
16419  VmaAllocator allocator,
16420  size_t allocationCount,
16421  VmaAllocation* pAllocations)
16422 {
16423  if(allocationCount == 0)
16424  {
16425  return;
16426  }
16427 
16428  VMA_ASSERT(allocator);
16429 
16430  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16431 
16432  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16433 
16434 #if VMA_RECORDING_ENABLED
16435  if(allocator->GetRecorder() != VMA_NULL)
16436  {
16437  allocator->GetRecorder()->RecordFreeMemoryPages(
16438  allocator->GetCurrentFrameIndex(),
16439  (uint64_t)allocationCount,
16440  pAllocations);
16441  }
16442 #endif
16443 
16444  allocator->FreeMemory(allocationCount, pAllocations);
16445 }
16446 
16447 VkResult vmaResizeAllocation(
16448  VmaAllocator allocator,
16449  VmaAllocation allocation,
16450  VkDeviceSize newSize)
16451 {
16452  VMA_ASSERT(allocator && allocation);
16453 
16454  VMA_DEBUG_LOG("vmaResizeAllocation");
16455 
16456  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16457 
16458  return allocator->ResizeAllocation(allocation, newSize);
16459 }
16460 
16462  VmaAllocator allocator,
16463  VmaAllocation allocation,
16464  VmaAllocationInfo* pAllocationInfo)
16465 {
16466  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16467 
16468  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16469 
16470 #if VMA_RECORDING_ENABLED
16471  if(allocator->GetRecorder() != VMA_NULL)
16472  {
16473  allocator->GetRecorder()->RecordGetAllocationInfo(
16474  allocator->GetCurrentFrameIndex(),
16475  allocation);
16476  }
16477 #endif
16478 
16479  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16480 }
16481 
16482 VkBool32 vmaTouchAllocation(
16483  VmaAllocator allocator,
16484  VmaAllocation allocation)
16485 {
16486  VMA_ASSERT(allocator && allocation);
16487 
16488  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16489 
16490 #if VMA_RECORDING_ENABLED
16491  if(allocator->GetRecorder() != VMA_NULL)
16492  {
16493  allocator->GetRecorder()->RecordTouchAllocation(
16494  allocator->GetCurrentFrameIndex(),
16495  allocation);
16496  }
16497 #endif
16498 
16499  return allocator->TouchAllocation(allocation);
16500 }
16501 
16503  VmaAllocator allocator,
16504  VmaAllocation allocation,
16505  void* pUserData)
16506 {
16507  VMA_ASSERT(allocator && allocation);
16508 
16509  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16510 
16511  allocation->SetUserData(allocator, pUserData);
16512 
16513 #if VMA_RECORDING_ENABLED
16514  if(allocator->GetRecorder() != VMA_NULL)
16515  {
16516  allocator->GetRecorder()->RecordSetAllocationUserData(
16517  allocator->GetCurrentFrameIndex(),
16518  allocation,
16519  pUserData);
16520  }
16521 #endif
16522 }
16523 
16525  VmaAllocator allocator,
16526  VmaAllocation* pAllocation)
16527 {
16528  VMA_ASSERT(allocator && pAllocation);
16529 
16530  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16531 
16532  allocator->CreateLostAllocation(pAllocation);
16533 
16534 #if VMA_RECORDING_ENABLED
16535  if(allocator->GetRecorder() != VMA_NULL)
16536  {
16537  allocator->GetRecorder()->RecordCreateLostAllocation(
16538  allocator->GetCurrentFrameIndex(),
16539  *pAllocation);
16540  }
16541 #endif
16542 }
16543 
16544 VkResult vmaMapMemory(
16545  VmaAllocator allocator,
16546  VmaAllocation allocation,
16547  void** ppData)
16548 {
16549  VMA_ASSERT(allocator && allocation && ppData);
16550 
16551  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16552 
16553  VkResult res = allocator->Map(allocation, ppData);
16554 
16555 #if VMA_RECORDING_ENABLED
16556  if(allocator->GetRecorder() != VMA_NULL)
16557  {
16558  allocator->GetRecorder()->RecordMapMemory(
16559  allocator->GetCurrentFrameIndex(),
16560  allocation);
16561  }
16562 #endif
16563 
16564  return res;
16565 }
16566 
16567 void vmaUnmapMemory(
16568  VmaAllocator allocator,
16569  VmaAllocation allocation)
16570 {
16571  VMA_ASSERT(allocator && allocation);
16572 
16573  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16574 
16575 #if VMA_RECORDING_ENABLED
16576  if(allocator->GetRecorder() != VMA_NULL)
16577  {
16578  allocator->GetRecorder()->RecordUnmapMemory(
16579  allocator->GetCurrentFrameIndex(),
16580  allocation);
16581  }
16582 #endif
16583 
16584  allocator->Unmap(allocation);
16585 }
16586 
16587 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16588 {
16589  VMA_ASSERT(allocator && allocation);
16590 
16591  VMA_DEBUG_LOG("vmaFlushAllocation");
16592 
16593  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16594 
16595  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16596 
16597 #if VMA_RECORDING_ENABLED
16598  if(allocator->GetRecorder() != VMA_NULL)
16599  {
16600  allocator->GetRecorder()->RecordFlushAllocation(
16601  allocator->GetCurrentFrameIndex(),
16602  allocation, offset, size);
16603  }
16604 #endif
16605 }
16606 
16607 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16608 {
16609  VMA_ASSERT(allocator && allocation);
16610 
16611  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16612 
16613  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16614 
16615  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16616 
16617 #if VMA_RECORDING_ENABLED
16618  if(allocator->GetRecorder() != VMA_NULL)
16619  {
16620  allocator->GetRecorder()->RecordInvalidateAllocation(
16621  allocator->GetCurrentFrameIndex(),
16622  allocation, offset, size);
16623  }
16624 #endif
16625 }
16626 
16627 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16628 {
16629  VMA_ASSERT(allocator);
16630 
16631  VMA_DEBUG_LOG("vmaCheckCorruption");
16632 
16633  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16634 
16635  return allocator->CheckCorruption(memoryTypeBits);
16636 }
16637 
16638 VkResult vmaDefragment(
16639  VmaAllocator allocator,
16640  VmaAllocation* pAllocations,
16641  size_t allocationCount,
16642  VkBool32* pAllocationsChanged,
16643  const VmaDefragmentationInfo *pDefragmentationInfo,
16644  VmaDefragmentationStats* pDefragmentationStats)
16645 {
16646  // Deprecated interface, reimplemented using new one.
16647 
16648  VmaDefragmentationInfo2 info2 = {};
16649  info2.allocationCount = (uint32_t)allocationCount;
16650  info2.pAllocations = pAllocations;
16651  info2.pAllocationsChanged = pAllocationsChanged;
16652  if(pDefragmentationInfo != VMA_NULL)
16653  {
16654  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16655  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16656  }
16657  else
16658  {
16659  info2.maxCpuAllocationsToMove = UINT32_MAX;
16660  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16661  }
16662  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16663 
16665  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16666  if(res == VK_NOT_READY)
16667  {
16668  res = vmaDefragmentationEnd( allocator, ctx);
16669  }
16670  return res;
16671 }
16672 
16673 VkResult vmaDefragmentationBegin(
16674  VmaAllocator allocator,
16675  const VmaDefragmentationInfo2* pInfo,
16676  VmaDefragmentationStats* pStats,
16677  VmaDefragmentationContext *pContext)
16678 {
16679  VMA_ASSERT(allocator && pInfo && pContext);
16680 
16681  // Degenerate case: Nothing to defragment.
16682  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16683  {
16684  return VK_SUCCESS;
16685  }
16686 
16687  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16688  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16689  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16690  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16691 
16692  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16693 
16694  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16695 
16696  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16697 
16698 #if VMA_RECORDING_ENABLED
16699  if(allocator->GetRecorder() != VMA_NULL)
16700  {
16701  allocator->GetRecorder()->RecordDefragmentationBegin(
16702  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16703  }
16704 #endif
16705 
16706  return res;
16707 }
16708 
16709 VkResult vmaDefragmentationEnd(
16710  VmaAllocator allocator,
16711  VmaDefragmentationContext context)
16712 {
16713  VMA_ASSERT(allocator);
16714 
16715  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16716 
16717  if(context != VK_NULL_HANDLE)
16718  {
16719  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16720 
16721 #if VMA_RECORDING_ENABLED
16722  if(allocator->GetRecorder() != VMA_NULL)
16723  {
16724  allocator->GetRecorder()->RecordDefragmentationEnd(
16725  allocator->GetCurrentFrameIndex(), context);
16726  }
16727 #endif
16728 
16729  return allocator->DefragmentationEnd(context);
16730  }
16731  else
16732  {
16733  return VK_SUCCESS;
16734  }
16735 }
16736 
16737 VkResult vmaBindBufferMemory(
16738  VmaAllocator allocator,
16739  VmaAllocation allocation,
16740  VkBuffer buffer)
16741 {
16742  VMA_ASSERT(allocator && allocation && buffer);
16743 
16744  VMA_DEBUG_LOG("vmaBindBufferMemory");
16745 
16746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16747 
16748  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
16749 }
16750 
16751 VkResult vmaBindBufferMemory2(
16752  VmaAllocator allocator,
16753  VmaAllocation allocation,
16754  VkDeviceSize allocationLocalOffset,
16755  VkBuffer buffer,
16756  const void* pNext)
16757 {
16758  VMA_ASSERT(allocator && allocation && buffer);
16759 
16760  VMA_DEBUG_LOG("vmaBindBufferMemory2");
16761 
16762  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16763 
16764  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
16765 }
16766 
16767 VkResult vmaBindImageMemory(
16768  VmaAllocator allocator,
16769  VmaAllocation allocation,
16770  VkImage image)
16771 {
16772  VMA_ASSERT(allocator && allocation && image);
16773 
16774  VMA_DEBUG_LOG("vmaBindImageMemory");
16775 
16776  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16777 
16778  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
16779 }
16780 
16781 VkResult vmaBindImageMemory2(
16782  VmaAllocator allocator,
16783  VmaAllocation allocation,
16784  VkDeviceSize allocationLocalOffset,
16785  VkImage image,
16786  const void* pNext)
16787 {
16788  VMA_ASSERT(allocator && allocation && image);
16789 
16790  VMA_DEBUG_LOG("vmaBindImageMemory2");
16791 
16792  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16793 
16794  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
16795 }
16796 
16797 VkResult vmaCreateBuffer(
16798  VmaAllocator allocator,
16799  const VkBufferCreateInfo* pBufferCreateInfo,
16800  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16801  VkBuffer* pBuffer,
16802  VmaAllocation* pAllocation,
16803  VmaAllocationInfo* pAllocationInfo)
16804 {
16805  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16806 
16807  if(pBufferCreateInfo->size == 0)
16808  {
16809  return VK_ERROR_VALIDATION_FAILED_EXT;
16810  }
16811 
16812  VMA_DEBUG_LOG("vmaCreateBuffer");
16813 
16814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16815 
16816  *pBuffer = VK_NULL_HANDLE;
16817  *pAllocation = VK_NULL_HANDLE;
16818 
16819  // 1. Create VkBuffer.
16820  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16821  allocator->m_hDevice,
16822  pBufferCreateInfo,
16823  allocator->GetAllocationCallbacks(),
16824  pBuffer);
16825  if(res >= 0)
16826  {
16827  // 2. vkGetBufferMemoryRequirements.
16828  VkMemoryRequirements vkMemReq = {};
16829  bool requiresDedicatedAllocation = false;
16830  bool prefersDedicatedAllocation = false;
16831  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16832  requiresDedicatedAllocation, prefersDedicatedAllocation);
16833 
16834  // Make sure alignment requirements for specific buffer usages reported
16835  // in Physical Device Properties are included in alignment reported by memory requirements.
16836  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16837  {
16838  VMA_ASSERT(vkMemReq.alignment %
16839  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16840  }
16841  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16842  {
16843  VMA_ASSERT(vkMemReq.alignment %
16844  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16845  }
16846  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16847  {
16848  VMA_ASSERT(vkMemReq.alignment %
16849  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16850  }
16851 
16852  // 3. Allocate memory using allocator.
16853  res = allocator->AllocateMemory(
16854  vkMemReq,
16855  requiresDedicatedAllocation,
16856  prefersDedicatedAllocation,
16857  *pBuffer, // dedicatedBuffer
16858  VK_NULL_HANDLE, // dedicatedImage
16859  *pAllocationCreateInfo,
16860  VMA_SUBALLOCATION_TYPE_BUFFER,
16861  1, // allocationCount
16862  pAllocation);
16863 
16864 #if VMA_RECORDING_ENABLED
16865  if(allocator->GetRecorder() != VMA_NULL)
16866  {
16867  allocator->GetRecorder()->RecordCreateBuffer(
16868  allocator->GetCurrentFrameIndex(),
16869  *pBufferCreateInfo,
16870  *pAllocationCreateInfo,
16871  *pAllocation);
16872  }
16873 #endif
16874 
16875  if(res >= 0)
16876  {
16877  // 3. Bind buffer with memory.
16878  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16879  {
16880  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
16881  }
16882  if(res >= 0)
16883  {
16884  // All steps succeeded.
16885  #if VMA_STATS_STRING_ENABLED
16886  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16887  #endif
16888  if(pAllocationInfo != VMA_NULL)
16889  {
16890  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16891  }
16892 
16893  return VK_SUCCESS;
16894  }
16895  allocator->FreeMemory(
16896  1, // allocationCount
16897  pAllocation);
16898  *pAllocation = VK_NULL_HANDLE;
16899  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16900  *pBuffer = VK_NULL_HANDLE;
16901  return res;
16902  }
16903  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16904  *pBuffer = VK_NULL_HANDLE;
16905  return res;
16906  }
16907  return res;
16908 }
16909 
16910 void vmaDestroyBuffer(
16911  VmaAllocator allocator,
16912  VkBuffer buffer,
16913  VmaAllocation allocation)
16914 {
16915  VMA_ASSERT(allocator);
16916 
16917  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16918  {
16919  return;
16920  }
16921 
16922  VMA_DEBUG_LOG("vmaDestroyBuffer");
16923 
16924  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16925 
16926 #if VMA_RECORDING_ENABLED
16927  if(allocator->GetRecorder() != VMA_NULL)
16928  {
16929  allocator->GetRecorder()->RecordDestroyBuffer(
16930  allocator->GetCurrentFrameIndex(),
16931  allocation);
16932  }
16933 #endif
16934 
16935  if(buffer != VK_NULL_HANDLE)
16936  {
16937  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16938  }
16939 
16940  if(allocation != VK_NULL_HANDLE)
16941  {
16942  allocator->FreeMemory(
16943  1, // allocationCount
16944  &allocation);
16945  }
16946 }
16947 
16948 VkResult vmaCreateImage(
16949  VmaAllocator allocator,
16950  const VkImageCreateInfo* pImageCreateInfo,
16951  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16952  VkImage* pImage,
16953  VmaAllocation* pAllocation,
16954  VmaAllocationInfo* pAllocationInfo)
16955 {
16956  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16957 
16958  if(pImageCreateInfo->extent.width == 0 ||
16959  pImageCreateInfo->extent.height == 0 ||
16960  pImageCreateInfo->extent.depth == 0 ||
16961  pImageCreateInfo->mipLevels == 0 ||
16962  pImageCreateInfo->arrayLayers == 0)
16963  {
16964  return VK_ERROR_VALIDATION_FAILED_EXT;
16965  }
16966 
16967  VMA_DEBUG_LOG("vmaCreateImage");
16968 
16969  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16970 
16971  *pImage = VK_NULL_HANDLE;
16972  *pAllocation = VK_NULL_HANDLE;
16973 
16974  // 1. Create VkImage.
16975  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16976  allocator->m_hDevice,
16977  pImageCreateInfo,
16978  allocator->GetAllocationCallbacks(),
16979  pImage);
16980  if(res >= 0)
16981  {
16982  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16983  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16984  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16985 
16986  // 2. Allocate memory using allocator.
16987  VkMemoryRequirements vkMemReq = {};
16988  bool requiresDedicatedAllocation = false;
16989  bool prefersDedicatedAllocation = false;
16990  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16991  requiresDedicatedAllocation, prefersDedicatedAllocation);
16992 
16993  res = allocator->AllocateMemory(
16994  vkMemReq,
16995  requiresDedicatedAllocation,
16996  prefersDedicatedAllocation,
16997  VK_NULL_HANDLE, // dedicatedBuffer
16998  *pImage, // dedicatedImage
16999  *pAllocationCreateInfo,
17000  suballocType,
17001  1, // allocationCount
17002  pAllocation);
17003 
17004 #if VMA_RECORDING_ENABLED
17005  if(allocator->GetRecorder() != VMA_NULL)
17006  {
17007  allocator->GetRecorder()->RecordCreateImage(
17008  allocator->GetCurrentFrameIndex(),
17009  *pImageCreateInfo,
17010  *pAllocationCreateInfo,
17011  *pAllocation);
17012  }
17013 #endif
17014 
17015  if(res >= 0)
17016  {
17017  // 3. Bind image with memory.
17018  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17019  {
17020  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17021  }
17022  if(res >= 0)
17023  {
17024  // All steps succeeded.
17025  #if VMA_STATS_STRING_ENABLED
17026  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17027  #endif
17028  if(pAllocationInfo != VMA_NULL)
17029  {
17030  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17031  }
17032 
17033  return VK_SUCCESS;
17034  }
17035  allocator->FreeMemory(
17036  1, // allocationCount
17037  pAllocation);
17038  *pAllocation = VK_NULL_HANDLE;
17039  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17040  *pImage = VK_NULL_HANDLE;
17041  return res;
17042  }
17043  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17044  *pImage = VK_NULL_HANDLE;
17045  return res;
17046  }
17047  return res;
17048 }
17049 
17050 void vmaDestroyImage(
17051  VmaAllocator allocator,
17052  VkImage image,
17053  VmaAllocation allocation)
17054 {
17055  VMA_ASSERT(allocator);
17056 
17057  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17058  {
17059  return;
17060  }
17061 
17062  VMA_DEBUG_LOG("vmaDestroyImage");
17063 
17064  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17065 
17066 #if VMA_RECORDING_ENABLED
17067  if(allocator->GetRecorder() != VMA_NULL)
17068  {
17069  allocator->GetRecorder()->RecordDestroyImage(
17070  allocator->GetCurrentFrameIndex(),
17071  allocation);
17072  }
17073 #endif
17074 
17075  if(image != VK_NULL_HANDLE)
17076  {
17077  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17078  }
17079  if(allocation != VK_NULL_HANDLE)
17080  {
17081  allocator->FreeMemory(
17082  1, // allocationCount
17083  &allocation);
17084  }
17085 }
17086 
17087 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1851
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1812
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1756
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1846
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1810
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1733
value
GLfloat value
Definition: qgl_win.c:63
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1872
pointer
GLenum GLsizei const GLvoid * pointer
Definition: qgl_win.c:114
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2351
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2867
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1898
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated.
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2423
first
GLint first
Definition: qgl_win.c:128
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Allocation strategy that chooses smallest possible free range for the allocation.
Definition: vk_mem_alloc.h:2180
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1765
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2429
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a null-terminated string.
Definition: vk_mem_alloc.h:2164
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1838
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1752
T
#define T(a, b, c, d, k, s)
Definition: md4.c:22
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Allocation created with this flag can become lost as a result of another allocation with VMA_ALLOCATI...
Definition: vk_mem_alloc.h:2151
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1878
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1844
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
v
GLdouble v
Definition: qgl_win.c:143
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1815
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2545
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2002
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2254
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2008
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1814
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1690
x
GLint GLenum GLint x
Definition: qgl_win.c:116
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Allocation strategy that chooses first suitable free range for the allocation.
Definition: vk_mem_alloc.h:2191
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
i
int i
Definition: q_shared.c:305
device
static ma_device device
Definition: snd_miniaudio.c:47
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1826
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1866
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2125
buffer
GLenum GLfloat * buffer
Definition: qgl_win.c:151
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2853
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2442
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Allocation strategy that chooses biggest possible free range for the allocation.
Definition: vk_mem_alloc.h:2184
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1787
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2947
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2054
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1998
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2379
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:2391
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2885
type
GLenum type
Definition: qgl_win.c:72
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2432
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1803
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1801
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2852
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes.
Definition: vk_mem_alloc.h:2550
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2107
j
GLint j
Definition: qgl_win.c:150
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1811
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2372
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2247
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
u
static int u
Definition: r_part.c:472
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2016
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2114
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Enables usage of VK_KHR_bind_memory2 extension.
Definition: vk_mem_alloc.h:1799
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use.
Definition: vk_mem_alloc.h:2864
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1819
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2916
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2951
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2426
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1813
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1848
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Memory will be mappable on host.
Definition: vk_mem_alloc.h:2086
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2901
VmaAllocation
Represents single memory allocation.
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls.
Definition: vk_mem_alloc.h:1942
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1818
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1820
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional.
Definition: vk_mem_alloc.h:2930
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1739
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2012
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2396
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1995
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2955
true
@ true
Definition: q_shared.h:63
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Memory will be used on device only, so fast access from the device is preferred.
Definition: vk_mem_alloc.h:2076
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
false
@ false
Definition: q_shared.h:63
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2006
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2004
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1881
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1760
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:2404
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2376
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1754
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Memory mappable on host (guarantees to be HOST_VISIBLE) and cached.
Definition: vk_mem_alloc.h:2102
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
While creating allocation using this flag, other allocations that were created with flag VMA_ALLOCATI...
Definition: vk_mem_alloc.h:2158
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2382
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2103
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2000
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1817
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2861
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2942
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2370
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2228
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Allocation strategy that tries to minimize memory fragmentation.
Definition: vk_mem_alloc.h:2201
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
s
static fixed16_t s
Definition: r_scan.c:30
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2007
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
y
GLint y
Definition: qgl_win.c:115
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1884
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
A bit mask to extract only STRATEGY bits from entire set of flags.
Definition: vk_mem_alloc.h:2205
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1875
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1923
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1809
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2564
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1869
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2855
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1824
up
static vec3_t up
Definition: p_view.c:29
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2008
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Memory that is both mappable on host (guarantees to be HOST_VISIBLE) and preferably fast to access by...
Definition: vk_mem_alloc.h:2093
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Allocation strategy that tries to minimize allocation time.
Definition: vk_mem_alloc.h:2198
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Allocation strategy that tries to minimize memory usage.
Definition: vk_mem_alloc.h:2195
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
level
GLint level
Definition: qgl_win.c:116
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2223
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2007
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2555
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1854
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1816
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2569
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2334
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2015
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions.
Definition: vk_mem_alloc.h:1935
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2445
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2212
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2014
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
No intended memory usage specified.
Definition: vk_mem_alloc.h:2059
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2921
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1823
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2418
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1825
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2906
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2536
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2138
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1822
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2435
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2316
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2953
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2008
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Allocation will be created from upper stack in a double stack pool.
Definition: vk_mem_alloc.h:2169
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
pattern
GLushort pattern
Definition: qgl_win.c:217
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2217
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1821
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2214
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2233
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output.
Definition: vk_mem_alloc.h:2882
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2957
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2241
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2959
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1862
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2007
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2531
void
void(APIENTRY *qglAccum)(GLenum op
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2362
count
GLint GLsizei count
Definition: qgl_win.c:128
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2210
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2876
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Bit mask to extract only ALGORITHM bits from entire set of flags.
Definition: vk_mem_alloc.h:2366
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2911
VmaDefragmentationInfo
Deprecated.
Definition: vk_mem_alloc.h:2937
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Create both buffer/image and allocation, but don't bind them together.
Definition: vk_mem_alloc.h:2175
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.