From 2accc9a95206c4f1ba0f93de9393678ee2e27060 Mon Sep 17 00:00:00 2001 From: Noah Metz Date: Wed, 9 Oct 2024 10:54:19 -0600 Subject: [PATCH] Initial commit of C vulkan code --- client/Makefile | 55 + client/include/render.h | 91 + client/include/vk_mem_alloc.h | 19109 ++++++++++++++++++++++++++++++++ client/main.go | 221 - client/src/main.c | 34 + client/src/main.o | Bin 0 -> 24944 bytes client/src/render.c | 1050 ++ client/src/render.o | Bin 0 -> 188296 bytes client/src/vma.cpp | 2 + client/src/vma.o | Bin 0 -> 915456 bytes client/src/vulkan.c | 0 11 files changed, 20341 insertions(+), 221 deletions(-) create mode 100644 client/Makefile create mode 100644 client/include/render.h create mode 100644 client/include/vk_mem_alloc.h delete mode 100644 client/main.go create mode 100644 client/src/main.c create mode 100644 client/src/main.o create mode 100644 client/src/render.c create mode 100644 client/src/render.o create mode 100644 client/src/vma.cpp create mode 100644 client/src/vma.o create mode 100644 client/src/vulkan.c diff --git a/client/Makefile b/client/Makefile new file mode 100644 index 0000000..51ac202 --- /dev/null +++ b/client/Makefile @@ -0,0 +1,55 @@ +ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +CFLAGS = -I $(ROOT_DIR)/include -I/usr/local/include -O0 -g -Wall -Wextra +LDFLAGS = -L/opt/homebrew/opt/llvm/lib -L/opt/homebrew/opt/llvm/lib/c++ -L/opt/homebrew/lib -lglfw -lvulkan -ldl -Xlinker -rpath -Xlinker /opt/homebrew/lib +CC = /opt/homebrew/opt/llvm/bin/clang +CPP = /opt/homebrew/opt/llvm/bin/clang++ +DSYM = /opt/homebrew/opt/llvm/bin/dsymutil +GDB = /opt/homebrew/opt/llvm/bin/lldb + +SOURCES = src/main.c src/render.c src/vma.cpp +OBJECTS = $(addsuffix .o, $(basename $(SOURCES))) +VERT_SPV = $(addsuffix .vert.spv, $(basename $(wildcard shader_src/*.vert))) +FRAG_SPV = $(addsuffix .frag.spv, $(basename $(wildcard shader_src/*.frag))) + +export MVK_CONFIG_USE_METAL_ARGUMENT_BUFFERS=1 + +.PHONY: all +all: roleplay $(VERT_SPV) $(FRAG_SPV) + +roleplay: $(OBJECTS) + $(CPP) $(CFLAGS) $(LDFLAGS) -o $@ $^ + +%.o: %.cpp + $(CPP) $(CFLAGS) -c -o $@ $< + +%.o: %.c + $(CC) $(CFLAGS) -c -o $@ $< + +.PHONY: clean clean_compdb + +clean: + rm -f $(FRAG_SPV) + rm -f $(VERT_SPV) + rm -f $(OBJECTS) + rm -f roleplay + rm -rf roleplay.dSYM + +clean_compdb: + rm -rf .compdb + rm compile_commands.json + +run: roleplay + ./roleplay + +roleplay.dSYM: roleplay + $(DSYM) roleplay + +debug: roleplay roleplay.dSYM + $(GDB) roleplay + + +%.vert.spv: %.vert + glslangValidator -V -o $@ $< + +%.frag.spv: %.frag + glslangValidator -V -o $@ $< diff --git a/client/include/render.h b/client/include/render.h new file mode 100644 index 0000000..edaedea --- /dev/null +++ b/client/include/render.h @@ -0,0 +1,91 @@ +#ifndef RENDER_H +#define RENDER_H + +#define VK_USE_PLATFORM_MACOS_MVK +#include "vulkan/vulkan_core.h" +#include "vulkan/vk_enum_string_helper.h" + +#include "vk_mem_alloc.h" + +#define GLFW_INCLUDE_VULKAN +#include +#define GLFW_EXPOSE_NATIVE_COCOA +#include + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct QueueStruct { + VkQueue handle; + uint32_t family; + uint32_t index; +} Queue; + +typedef struct SwapchainDetailsStruct { + VkSurfaceCapabilitiesKHR capabilities; + + VkSurfaceFormatKHR* formats; + uint32_t formats_count; + + VkPresentModeKHR* present_modes; + uint32_t present_modes_count; +} SwapchainDetails; + +typedef struct RenderContextStruct { + VkInstance instance; + VkDebugUtilsMessengerEXT debug_messenger; + VkPhysicalDevice physical_device; + VkPhysicalDeviceMemoryProperties memories; + VkSurfaceKHR surface; + Queue graphics_queue; + Queue present_queue; + Queue transfer_queue; + VkDevice device; + VmaAllocator allocator; + + SwapchainDetails swapchain_details; + VkSurfaceFormatKHR swapchain_format; + VkPresentModeKHR swapchain_present_mode; + VkExtent2D swapchain_extent; + VkSwapchainKHR swapchain; + + uint32_t swapchain_image_count; + VkImage* swapchain_images; + VkImageView* swapchain_image_views; + VkFramebuffer* swapchain_framebuffers; + + VkFormat depth_format; + VkImageView depth_image_view; + VkImage depth_image; + VmaAllocation depth_image_memory; + + VkCommandPool extra_graphics_pool; + VkCommandPool graphics_pool; + VkCommandPool transfer_pool; + + VkRenderPass render_pass; + + VkCommandBuffer* swapchain_command_buffers; + + VkSemaphore* image_available_semaphores; + VkSemaphore* render_finished_semaphores; + + VkFence* in_flight_fences; + + VkPipeline ui_pipeline_rect; + VkPipeline ui_pipeline_text; +} RenderContext; + +GLFWwindow* init_window(); +VkResult init_vulkan(GLFWwindow* window, RenderContext* context); + +#endif diff --git a/client/include/vk_mem_alloc.h b/client/include/vk_mem_alloc.h new file mode 100644 index 0000000..47494fd --- /dev/null +++ b/client/include/vk_mem_alloc.h @@ -0,0 +1,19109 @@ +// +// Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.1.0 + +Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT \n +See also: [product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/), +[repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + + +API documentation divided into groups: [Topics](topics.html) + +General documentation chapters: + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Copy functions](@ref memory_mapping_copy_functions) + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage resource_aliasing + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [When not to use custom pools](@ref custom_memory_pools_when_not_use) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - \subpage defragmentation + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage virtual_allocator + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - [Leak detection features](@ref debugging_memory_usage_leak_detection) + - \subpage other_api_interop +- \subpage usage_patterns + - [GPU-only resource](@ref usage_patterns_gpu_only) + - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) + - [Readback](@ref usage_patterns_readback) + - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) + - [Other use cases](@ref usage_patterns_other_use_cases) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) +- Extension support + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_ext_memory_priority + - \subpage vk_amd_device_coherent_memory + - \subpage vk_khr_external_memory_win32 +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\defgroup group_init Library initialization + +\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. + +\defgroup group_alloc Memory allocation + +\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. +Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). + +\defgroup group_virtual Virtual allocator + +\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm +for user-defined purpose without allocating any real GPU memory. + +\defgroup group_stats Statistics + +\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. +See documentation chapter: \ref statistics. +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(VULKAN_H_) +#include +#endif + +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_3) + #define VMA_VULKAN_VERSION 1003000 + #elif defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. +#if !defined(VMA_MEMORY_PRIORITY) + #if VK_EXT_memory_priority + #define VMA_MEMORY_PRIORITY 1 + #else + #define VMA_MEMORY_PRIORITY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_maintenance4 device extension is defined in Vulkan headers. +#if !defined(VMA_KHR_MAINTENANCE4) + #if VK_KHR_maintenance4 + #define VMA_KHR_MAINTENANCE4 1 + #else + #define VMA_KHR_MAINTENANCE4 0 + #endif +#endif + +// Defined to 1 when VK_KHR_maintenance5 device extension is defined in Vulkan headers. +#if !defined(VMA_KHR_MAINTENANCE5) + #if VK_KHR_maintenance5 + #define VMA_KHR_MAINTENANCE5 1 + #else + #define VMA_KHR_MAINTENANCE5 0 + #endif +#endif + + +// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY) + #if VK_KHR_external_memory + #define VMA_EXTERNAL_MEMORY 1 + #else + #define VMA_EXTERNAL_MEMORY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_external_memory_win32 device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY_WIN32) + #if VK_KHR_external_memory_win32 + #define VMA_EXTERNAL_MEMORY_WIN32 1 + #else + #define VMA_EXTERNAL_MEMORY_WIN32 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan +// structure that will be extended via the pNext chain. +#ifndef VMA_EXTENDS_VK_STRUCT + #define VMA_EXTENDS_VK_STRUCT(vkStruct) +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_STATS_STRING_ENABLED + #define VMA_STATS_STRING_ENABLED 1 +#endif + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// INTERFACE +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. +#ifndef _VMA_ENUM_DECLARATIONS + +/** +\addtogroup group_init +@{ +*/ + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits +{ + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extension will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + /** + Enables usage of VK_KHR_maintenance4 extension in the library. + + You may set this flag only if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + */ + VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT = 0x00000080, + /** + Enables usage of VK_KHR_maintenance5 extension in the library. + + You should set this flag if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + */ + VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT = 0x00000100, + + /** + Enables usage of VK_KHR_external_memory_win32 extension in the library. + + You should set this flag if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + For more information, see \ref vk_khr_external_memory_win32. + */ + VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT = 0x00000200, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +/// See #VmaAllocatorCreateFlagBits. +typedef VkFlags VmaAllocatorCreateFlags; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// \brief Intended usage of the allocated memory. +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits +{ + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + + If you use this flag while creating a buffer or an image, `VkMemoryDedicatedAllocateInfo` + structure is applied if possible. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pName`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + /** \brief Set this flag if the allocated memory will have aliasing resources. + + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. + Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. + */ + VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is preferred. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, + /** Allocation strategy that chooses smallest possible free range for the allocation + to minimize memory usage and fragmentation, possibly at the expense of allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, + /** Allocation strategy that chooses first suitable free range for the allocation - + not necessarily in terms of the smallest offset but the one that is easiest and fastest to find + to minimize allocation time, possibly at the expense of allocation quality. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recommended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +/// See #VmaAllocationCreateFlagBits. +typedef VkFlags VmaAllocationCreateFlags; + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits +{ + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. +typedef VkFlags VmaPoolCreateFlags; + +/// Flags to be passed as VmaDefragmentationInfo::flags. +typedef enum VmaDefragmentationFlagBits +{ + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +/// See #VmaDefragmentationFlagBits. +typedef VkFlags VmaDefragmentationFlags; + +/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. +typedef enum VmaDefragmentationMoveOperation +{ + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, +} VmaDefragmentationMoveOperation; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. +typedef enum VmaVirtualBlockCreateFlagBits +{ + /** \brief Enables alternative, linear allocation algorithm in this virtual block. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, + + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualBlockCreateFlagBits; +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. +typedef VkFlags VmaVirtualBlockCreateFlags; + +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. +typedef enum VmaVirtualAllocationCreateFlagBits +{ + /** \brief Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + /** \brief Allocation strategy that tries to minimize memory usage. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** \brief Allocation strategy that tries to minimize allocation time. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. + + These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, + + VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualAllocationCreateFlagBits; +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. +typedef VkFlags VmaVirtualAllocationCreateFlags; + +/** @} */ + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_DATA_TYPES_DECLARATIONS + +/** +\addtogroup group_init +@{ */ + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \struct VmaDefragmentationContext +\brief An opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. +Call function vmaEndDefragmentation() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualAllocation +\brief Represents single memory allocation done inside VmaVirtualBlock. + +Use it as a unique identifier to virtual allocation within the single block. + +Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. +*/ +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualBlock +\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. + +Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. +For more information, see documentation chapter \ref virtual_allocator. + +This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. +*/ +VK_DEFINE_HANDLE(VmaVirtualBlock) + +/** @} */ + +/** +\addtogroup group_init +@{ +*/ + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks +{ + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions +{ + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + /// Fetch from "vkGetPhysicalDeviceMemoryProperties2" on Vulkan >= 1.1, but you can also fetch it from "vkGetPhysicalDeviceMemoryProperties2KHR" if you enabled extension VK_KHR_get_physical_device_properties2. + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceImageMemoryRequirements; +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + PFN_vkGetMemoryWin32HandleKHR VMA_NULLABLE vkGetMemoryWin32HandleKHR; +#else + void* VMA_NULLABLE vkGetMemoryWin32HandleKHR; +#endif +} VmaVulkanFunctions; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. Vulkan version that the application uses. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + It must match the Vulkan version used by the application and supported on the selected physical device, + so it must be no higher than `VkApplicationInfo::apiVersion` passed to `vkCreateInstance` + and no higher than `VkPhysicalDeviceProperties::apiVersion` found on the physical device used. + */ + uint32_t vulkanApiVersion; +#if VMA_EXTERNAL_MEMORY + /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. + + If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` + elements, defining external memory handle types of particular Vulkan memory type, + to be passed using `VkExportMemoryAllocateInfoKHR`. + + Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. + This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. + */ + const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; +#endif // #if VMA_EXTERNAL_MEMORY +} VmaAllocatorCreateInfo; + +/// Information about existing #VmaAllocator object. +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + +These are fast to calculate. +See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). +*/ +typedef struct VmaStatistics +{ + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ + uint32_t blockCount; + /** \brief Number of #VmaAllocation objects allocated. + + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ + uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. + + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; +} VmaStatistics; + +/** \brief More detailed statistics than #VmaStatistics. + +These are slower to calculate. Use for debugging purposes. +See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + +Previous version of the statistics API provided averages, but they have been removed +because they can be easily calculated as: + +\code +VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; +VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; +VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; +\endcode +*/ +typedef struct VmaDetailedStatistics +{ + /// Basic statistics. + VmaStatistics statistics; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; +} VmaDetailedStatistics; + +/** \brief General statistics from current state of the Allocator - +total memory usage across all memory heaps and types. + +These are slower to calculate. Use for debugging purposes. +See function vmaCalculateStatistics(). +*/ +typedef struct VmaTotalStatistics +{ + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; +} VmaTotalStatistics; + +/** \brief Statistics of current memory usage and available budget for a specific memory heap. + +These are fast to calculate. +See function vmaGetHeapBudgets(). +*/ +typedef struct VmaBudget +{ + /** \brief Statistics fetched from the library. + */ + VmaStatistics statistics; + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, decided by the operating system. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Parameters of new #VmaAllocation. + +To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. +*/ +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; +} VmaAllocationCreateInfo; + +/// Describes parameter of created #VmaPool. +typedef struct VmaPoolCreateInfo +{ + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + In this case, the pool will also support dedicated allocations. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. + + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, + e.g. when doing interop with OpenGL. + */ + VkDeviceSize minAllocationAlignment; + /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. + + Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. + It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. + Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. + + Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, + can be attached automatically by this library when using other, more convenient of its features. + */ + void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext; +} VmaPoolCreateInfo; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). + +There is also an extended version of this structure that carries additional parameters: #VmaAllocationInfo2. +*/ +typedef struct VmaAllocationInfo +{ + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after the allocation is moved during \ref defragmentation. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). + + It can change after call to vmaSetAllocationName() for this allocation. + + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char* VMA_NULLABLE pName; +} VmaAllocationInfo; + +/// Extended parameters of a #VmaAllocation object that can be retrieved using function vmaGetAllocationInfo2(). +typedef struct VmaAllocationInfo2 +{ + /** \brief Basic parameters of the allocation. + + If you need only these, you can use function vmaGetAllocationInfo() and structure #VmaAllocationInfo instead. + */ + VmaAllocationInfo allocationInfo; + /** \brief Size of the `VkDeviceMemory` block that the allocation belongs to. + + In case of an allocation with dedicated memory, it will be equal to `allocationInfo.size`. + */ + VkDeviceSize blockSize; + /** \brief `VK_TRUE` if the allocation has dedicated memory, `VK_FALSE` if it was placed as part of a larger memory block. + + When `VK_TRUE`, it also means `VkMemoryDedicatedAllocateInfo` was used when creating the allocation + (if VK_KHR_dedicated_allocation extension or Vulkan version >= 1.1 is enabled). + */ + VkBool32 dedicatedMemory; +} VmaAllocationInfo2; + +/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass. + +Should return true if the defragmentation needs to stop current pass. +*/ +typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData); + +/** \brief Parameters for defragmentation. + +To be used with function vmaBeginDefragmentation(). +*/ +typedef struct VmaDefragmentationInfo +{ + /// \brief Use combination of #VmaDefragmentationFlagBits. + VmaDefragmentationFlags flags; + /** \brief Custom pool to be defragmented. + + If null then default pools will undergo defragmentation process. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. + + `0` means no limit. + */ + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. + + `0` means no limit. + */ + uint32_t maxAllocationsPerPass; + /** \brief Optional custom callback for stopping vmaBeginDefragmentation(). + + Have to return true for breaking current defragmentation pass. + */ + PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback; + /// \brief Optional data to pass to custom callback for stopping pass of defragmentation. + void* VMA_NULLABLE pBreakCallbackUserData; +} VmaDefragmentationInfo; + +/// Single move of an allocation to be done for defragmentation. +typedef struct VmaDefragmentationMove +{ + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; +} VmaDefragmentationMove; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassMoveInfo +{ + /// Number of elements in the `pMoves` array. + uint32_t moveCount; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. + + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). + + For each element, you should: + + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. + + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. + + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + + Alternatively, if you decide you want to completely remove the allocation: + + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + + Then, after vmaEndDefragmentationPass() the allocation will be freed. + */ + VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassMoveInfo; + +/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). +typedef struct VmaDefragmentationStats +{ + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). +typedef struct VmaVirtualBlockCreateInfo +{ + /** \brief Total size of the virtual block. + + Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. + For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + */ + VkDeviceSize size; + + /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. + */ + VmaVirtualBlockCreateFlags flags; + + /** \brief Custom CPU memory allocation callbacks. Optional. + + Optional, can be null. When specified, they will be used for all CPU-side memory allocations. + */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; +} VmaVirtualBlockCreateInfo; + +/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). +typedef struct VmaVirtualAllocationCreateInfo +{ + /** \brief Size of the allocation. + + Cannot be zero. + */ + VkDeviceSize size; + /** \brief Required alignment of the allocation. Optional. + + Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. + */ + VkDeviceSize alignment; + /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. + */ + VmaVirtualAllocationCreateFlags flags; + /** \brief Custom pointer to be associated with the allocation. Optional. + + It can be any value and can be used for user-defined purposes. It can be fetched or changed later. + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationCreateInfo; + +/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). +typedef struct VmaVirtualAllocationInfo +{ + /** \brief Offset of the allocation. + + Offset at which the allocation was made. + */ + VkDeviceSize offset; + /** \brief Size of the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::size. + */ + VkDeviceSize size; + /** \brief Custom pointer associated with the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationInfo; + +/** @} */ + +#endif // _VMA_DATA_TYPES_DECLARATIONS + +#ifndef _VMA_FUNCTION_HEADERS + +/** +\addtogroup group_init +@{ +*/ + +/// Creates #VmaAllocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. Use it for debugging purposes. +For faster but more brief statistics suitable to be called every frame or every allocation, +use vmaGetHeapBudgets(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaTotalStatistics* VMA_NOT_NULL pStats); + +/** \brief Retrieves information about current memory usage and budget for all memory heaps. + +\param allocator +\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +\param allocator Allocator object. +\param pCreateInfo Parameters of pool to create. +\param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaStatistics* VMA_NOT_NULL pPoolStats); + +/** \brief Retrieves detailed statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE* VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \brief General purpose memory allocation. + +\param allocator +\param pVkMemoryRequirements +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +\param allocator Allocator object. +\param pVkMemoryRequirements Memory requirements for each allocation. +\param pCreateInfo Creation parameters for each allocation. +\param allocationCount Number of allocations to make. +\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkBuffer`. + +\param allocator +\param buffer +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkImage`. + +\param allocator +\param image +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateImage(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Returns current information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. + +Although this function doesn't lock any mutex, so it should be quite efficient, +you should avoid calling it too often. +You can retrieve same VmaAllocationInfo structure while creating your resource, from function +vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change +(e.g. due to defragmentation). + +There is also a new function vmaGetAllocationInfo2() that offers extended information +about the allocation, returned using new structure #VmaAllocationInfo2. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Returns extended information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. +Extended parameters in structure #VmaAllocationInfo2 include memory block size +and a flag telling whether the allocation has dedicated memory. +It can be useful e.g. for interop with OpenGL. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo2* VMA_NOT_NULL pAllocationInfo); + +/** \brief Sets pUserData in given allocation to new value. + +The value of pointer `pUserData` is copied to allocation's `pUserData`. +It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Sets pName in given allocation to new value. + +`pName` must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pName`. String +passed as pName doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +`pName` is freed from memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName); + +/** +\brief Given an allocation, returns Property Flags of its memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetAllocationInfo() + vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + + +#if VMA_EXTERNAL_MEMORY_WIN32 +/** +\brief Given an allocation, returns Win32 handle that may be imported by other processes or APIs. + +\param hTargetProcess Must be a valid handle to target process or null. If it's null, the function returns + handle for the current process. +\param[out] pHandle Output parameter that returns the handle. + +The function fills `pHandle` with handle that can be used in target process. +The handle is fetched using function `vkGetMemoryWin32HandleKHR`. +When no longer needed, you must close it using: + +\code +CloseHandle(handle); +\endcode + +You can close it any time, before or after destroying the allocation object. +It is reference-counted internally by Windows. + +Note the handle is returned for the entire `VkDeviceMemory` block that the allocation belongs to. +If the allocation is sub-allocated from a larger block, you may need to consider the offset of the allocation +(VmaAllocationInfo::offset). + +If the function fails with `VK_ERROR_FEATURE_NOT_PRESENT` error code, please double-check +that VmaVulkanFunctions::vkGetMemoryWin32HandleKHR function pointer is set, e.g. either by using `VMA_DYNAMIC_VULKAN_FUNCTIONS` +or by manually passing it through VmaAllocatorCreateInfo::pVulkanFunctions. + +For more information, see chapter \ref vk_khr_external_memory_win32. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle); +#endif // VMA_EXTERNAL_MEMORY_WIN32 + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. + +\warning +If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is +correctly offsetted to the beginning of region assigned to this particular allocation. +Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. +You should not add VmaAllocationInfo::offset to it! + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE* VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Maps the allocation temporarily if needed, copies data from specified host pointer to it, and flushes the memory from the host caches if needed. + +\param allocator +\param pSrcHostPointer Pointer to the host data that become source of the copy. +\param dstAllocation Handle to the allocation that becomes destination of the copy. +\param dstAllocationLocalOffset Offset within `dstAllocation` where to write copied data, in bytes. +\param size Number of bytes to copy. + +This is a convenience function that allows to copy data from a host pointer to an allocation easily. +Same behavior can be achieved by calling vmaMapMemory(), `memcpy()`, vmaUnmapMemory(), vmaFlushAllocation(). + +This function can be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Otherwise, the function will fail and generate a Validation Layers error. + +`dstAllocationLocalOffset` is relative to the contents of given `dstAllocation`. +If you mean whole allocation, you should pass 0. +Do not pass allocation's offset within device memory block this parameter! +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( + VmaAllocator VMA_NOT_NULL allocator, + const void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pSrcHostPointer, + VmaAllocation VMA_NOT_NULL dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size); + +/** \brief Invalidates memory in the host caches if needed, maps the allocation temporarily if needed, and copies data from it to a specified host pointer. + +\param allocator +\param srcAllocation Handle to the allocation that becomes source of the copy. +\param srcAllocationLocalOffset Offset within `srcAllocation` where to read copied data, in bytes. +\param pDstHostPointer Pointer to the host memory that become destination of the copy. +\param size Number of bytes to copy. + +This is a convenience function that allows to copy data from an allocation to a host pointer easily. +Same behavior can be achieved by calling vmaInvalidateAllocation(), vmaMapMemory(), `memcpy()`, vmaUnmapMemory(). + +This function should be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT` flag. +It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Otherwise, the function may fail and generate a Validation Layers error. +It may also work very slowly when reading from an uncached memory. + +`srcAllocationLocalOffset` is relative to the contents of given `srcAllocation`. +If you mean whole allocation, you should pass 0. +Do not pass allocation's offset within device memory block as this parameter! +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pDstHostPointer, + VkDeviceSize size); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +\param allocator +\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits); + +/** \brief Begins defragmentation process. + +\param allocator Allocator object. +\param pInfo Structure filled with parameters of defragmentation. +\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. +\returns +- `VK_SUCCESS` if defragmentation can begin. +- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. + +For more information about defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, + VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pStats Optional stats for the defragmentation. Can be null. + +Use this function to finish defragmentation started by vmaBeginDefragmentation(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats* VMA_NULLABLE pStats); + +/** \brief Starts single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pPassInfo Computed information for current pass. +\returns +- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. +- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Ends single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. + +Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + +Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. +After this call: + +- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. +- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. + +If no more moves are possible you can end whole defragmentation. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param buffer +\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param image +\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext); + +/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + +\param allocator +\param pBufferCreateInfo +\param pAllocationCreateInfo +\param[out] pBuffer Buffer that was created. +\param[out] pAllocation Allocation that was created. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, `*pBuffer` and `*pAllocation` are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible +(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a buffer with additional minimum alignment. + +Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, +minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. +for interop with OpenGL. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note This is a new version of the function augmented with parameter `allocationLocalOffset`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaCreateAliasingBuffer() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/// Function similar to vmaCreateAliasingBuffer2() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \brief Creates new #VmaVirtualBlock object. + +\param pCreateInfo Parameters for creation. +\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); + +/** \brief Destroys #VmaVirtualBlock object. + +Please note that you should consciously handle virtual allocations that could remain unfreed in the block. +You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() +if you are sure this is what you want. If you do neither, an assert is called. + +If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, +don't forget to free them. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( + VmaVirtualBlock VMA_NULLABLE virtualBlock); + +/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + +/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + +If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned +(despite the function doesn't ever allocate actual GPU memory). +`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + +\param virtualBlock Virtual block +\param pCreateInfo Parameters for the allocation +\param[out] pAllocation Returned handle of the new allocation +\param[out] pOffset Returned offset of the new allocation. Optional, can be null. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset); + +/** \brief Frees virtual allocation inside given #VmaVirtualBlock. + +It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); + +/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. + +You must either call this function or free each virtual allocation individually with vmaVirtualFree() +before destroying a virtual block. Otherwise, an assert is called. + +If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, +don't forget to free it as well. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Changes custom pointer associated with given virtual allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats); + +/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is slow to call. Use for debugging purposes. +For less detailed statistics, see vmaGetVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats); + +/** @} */ + +#if VMA_STATS_STRING_ENABLED +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. +\param virtualBlock Virtual block. +\param[out] ppStatsString Returned string. +\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. + +Returned string must be freed using vmaFreeVirtualBlockStatsString(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +/// Frees a string returned by vmaBuildVirtualBlockStatsString(). +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString); + +/** \brief Builds and returns statistics as a null-terminated string in JSON format. +\param allocator +\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +\param detailedMap +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +/** @} */ + +#endif // VMA_STATS_STRING_ENABLED + +#endif // _VMA_FUNCTION_HEADERS + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include +#include +#include + +#if !defined(VMA_CPP20) + #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + #define VMA_CPP20 1 + #else + #define VMA_CPP20 0 + #endif +#endif + +#ifdef _MSC_VER + #include // For functions like __popcnt, _BitScanForward etc. +#endif +#if VMA_CPP20 + #include +#endif + +#if VMA_STATS_STRING_ENABLED + #include // For snprintf +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ +#ifndef _VMA_CONFIGURATION + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); + +To use this feature in new versions of VMA you now have to pass +VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as +VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +Define this macro to include custom header files without having to edit this file directly, e.g.: + + // Inside of "my_vma_configuration_user_includes.h": + + #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT + #include "my_custom_min.h" // for my_custom_min + #include + #include + + // Inside a different file, which includes "vk_mem_alloc.h": + + #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" + #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) + #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) + #include "vk_mem_alloc.h" + ... + +The following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) + #include // for assert + #include // for min, max, swap + #include +#else + #include VMA_CONFIGURATION_USER_INCLUDES_H +#endif + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#ifndef VMA_FALLTHROUGH + #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 + #define VMA_FALLTHROUGH [[fallthrough]] + #else + #define VMA_FALLTHROUGH + #endif +#endif + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +// Assert used for reporting memory leaks - unfreed allocations. +#ifndef VMA_ASSERT_LEAK + #define VMA_ASSERT_LEAK(expr) VMA_ASSERT(expr) +#endif + +// If your compiler is not compatible with C++17 and definition of +// aligned_alloc() function is missing, uncommenting following line may help: + +//#include + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) + // Therefore, for now disable this specific exception until a proper solution is found. + //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) + //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // // MAC_OS_X_VERSION_10_16), even though the function is marked + // // available for 10.15. That is why the preprocessor checks for 10.16 but + // // the __builtin_available checks for 10.15. + // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + // if (__builtin_available(macOS 10.15, iOS 13, *)) + // return aligned_alloc(alignment, size); + //#endif + //#endif + + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#elif defined(_WIN32) +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +#else +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system."); + return VMA_NULL; +} +#endif + +#if defined(_WIN32) +static void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +static void vma_aligned_free(void* VMA_NULLABLE ptr) +{ + free(ptr); +} +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_COUNT_BITS_SET + // Returns number of bits set to 1 in (v) + #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +#endif + +#ifndef VMA_BITSCAN_LSB + // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +#endif + +#ifndef VMA_BITSCAN_MSB + // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG_FORMAT + #define VMA_DEBUG_LOG_FORMAT(format, ...) + /* + #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) +#endif + +#ifndef VMA_LEAK_LOG_FORMAT + #define VMA_LEAK_LOG_FORMAT(format, ...) VMA_DEBUG_LOG_FORMAT(format, __VA_ARGS__) +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif +#ifndef VMA_CLASS_NO_COPY_NO_MOVE + #define VMA_CLASS_NO_COPY_NO_MOVE(className) \ + private: \ + className(const className&) = delete; \ + className(className&&) = delete; \ + className& operator=(const className&) = delete; \ + className& operator=(className&&) = delete; +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%" PRIu32, num); + } + static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%" PRIu64, num); + } + static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex) + public: + VmaMutex() { } + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && defined(SRWLOCK_INIT) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_MIN_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes. Must be power of two. + */ + #ifdef VMA_DEBUG_ALIGNMENT // Old name + #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT + #else + #define VMA_MIN_ALIGNMENT (1) + #endif +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + /* + Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount + and return error instead of leaving up to Vulkan implementation what to do in such cases. + */ + #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +/* +Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called +or a persistently mapped allocation is created and destroyed several times in a row. +It keeps additional +1 mapping of a device memory block to prevent calling actual +vkMapMemory/vkUnmapMemory too many times, which may improve performance and help +tools like RenderDoc. +*/ +#ifndef VMA_MAPPING_HYSTERESIS_ENABLED + #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#endif + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +/******************************************************************************* +END OF CONFIGURATION +*/ +#endif // _VMA_CONFIGURATION + + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; +static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; +static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +// This one is tricky. Vulkan specification defines this code as available since +// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. +// See pull request #207. +#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) + + +#if VMA_STATS_STRING_ENABLED +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = +{ + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; +#endif + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = + { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + + +#ifndef _VMA_ENUM_DECLARATIONS + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +enum VMA_CACHE_OPERATION +{ + VMA_CACHE_FLUSH, + VMA_CACHE_INVALIDATE +}; + +enum class VmaAllocationRequestType +{ + Normal, + TLSF, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_FORWARD_DECLARATIONS +// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); + +struct VmaMutexLock; +struct VmaMutexLockRead; +struct VmaMutexLockWrite; + +template +struct AtomicTransactionalIncrement; + +template +struct VmaStlAllocator; + +template +class VmaVector; + +template +class VmaSmallVector; + +template +class VmaPoolAllocator; + +template +struct VmaListItem; + +template +class VmaRawList; + +template +class VmaList; + +template +class VmaIntrusiveLinkedList; + +#if VMA_STATS_STRING_ENABLED +class VmaStringBuilder; +class VmaJsonWriter; +#endif + +class VmaDeviceMemoryBlock; + +struct VmaDedicatedAllocationListItemTraits; +class VmaDedicatedAllocationList; + +struct VmaSuballocation; +struct VmaSuballocationOffsetLess; +struct VmaSuballocationOffsetGreater; +struct VmaSuballocationItemSizeLess; + +typedef VmaList> VmaSuballocationList; + +struct VmaAllocationRequest; + +class VmaBlockMetadata; +class VmaBlockMetadata_Linear; +class VmaBlockMetadata_TLSF; + +class VmaBlockVector; + +struct VmaPoolListItemTraits; + +struct VmaCurrentBudgetData; + +class VmaAllocationObjectAllocator; + +#endif // _VMA_FORWARD_DECLARATIONS + + +#ifndef _VMA_FUNCTIONS + +/* +Returns number of bits set to 1 in (v). + +On specific platforms and compilers you can use intrinsics like: + +Visual Studio: + return __popcnt(v); +GCC, Clang: + return static_cast(__builtin_popcount(v)); + +Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. +But you need to check in runtime whether user's CPU supports these, as some old processors don't. +*/ +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ +#if VMA_CPP20 + return std::popcount(v); +#else + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanForward64(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif VMA_CPP20 + if(mask) + return static_cast(std::countr_zero(mask)); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffsll(mask)) - 1U; +#else + uint8_t pos = 0; + uint64_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 63); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanForward(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif VMA_CPP20 + if(mask) + return static_cast(std::countr_zero(mask)); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffs(mask)) - 1U; +#else + uint8_t pos = 0; + uint32_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 31); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanMSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanReverse64(&pos, mask)) + return static_cast(pos); +#elif VMA_CPP20 + if(mask) + return 63 - static_cast(std::countl_zero(mask)); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 63 - static_cast(__builtin_clzll(mask)); +#else + uint8_t pos = 63; + uint64_t bit = 1ULL << 63; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanMSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanReverse(&pos, mask)) + return static_cast(pos); +#elif VMA_CPP20 + if(mask) + return 31 - static_cast(std::countl_zero(mask)); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 31 - static_cast(__builtin_clz(mask)); +#else + uint8_t pos = 31; + uint32_t bit = 1UL << 31; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x - 1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} + +// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Divide by 'y' and round up to nearest integer. +template +static inline T VmaDivideRoundingUp(T x, T y) +{ + return (x + y - (T)1) / y; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} + +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} + +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if (suballocType1 > suballocType2) + { + std::swap(suballocType1, suballocType2); + } + + switch (suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) +{ + size_t down = 0, up = size_t(end - beg); + while (down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if (cmp(*(beg + mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if (it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for (uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if (iPtr == VMA_NULL) + { + return false; + } + for (uint32_t j = i + 1; j < count; ++j) + { + if (iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} +// Finds structure with s->sType == sType in mainStruct->pNext chain. +// Returns pointer to it. If not found, returns null. +template +static inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType) +{ + for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext; + s != VMA_NULL; s = s->pNext) + { + if(s->sType == sType) + { + return (const FindT*)s; + } + } + return VMA_NULL; +} + +// An abstraction over buffer or image `usage` flags, depending on available extensions. +struct VmaBufferImageUsage +{ +#if VMA_KHR_MAINTENANCE5 + typedef uint64_t BaseType; // VkFlags64 +#else + typedef uint32_t BaseType; // VkFlags32 +#endif + + static const VmaBufferImageUsage UNKNOWN; + + BaseType Value; + + VmaBufferImageUsage() { *this = UNKNOWN; } + explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { } + VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5); + explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo); + + bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; } + bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; } + + bool Contains(BaseType flag) const { return (Value & flag) != 0; } + bool ContainsDeviceAccess() const + { + // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*. + return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; + } +}; + +const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0); + +VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, + bool useKhrMaintenance5) +{ +#if VMA_KHR_MAINTENANCE5 + if(useKhrMaintenance5) + { + // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR, + // take usage from it and ignore VkBufferCreateInfo::usage, per specification + // of the VK_KHR_maintenance5 extension. + const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 = + VmaPnextChainFind(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR); + if(usageFlags2) + { + this->Value = usageFlags2->usage; + return; + } + } +#endif + + this->Value = (BaseType)createInfo.usage; +} + +VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo) +{ + // Maybe in the future there will be VK_KHR_maintenanceN extension with structure + // VkImageUsageFlags2CreateInfoKHR, like the one for buffers... + + this->Value = (BaseType)createInfo.usage; +} + +// This is the main algorithm that guides the selection of a memory type best for an allocation - +// converts usage to required/preferred/not preferred flags. +static bool FindMemoryPreferences( + bool isIntegratedGPU, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaBufferImageUsage bufImgUsage, + VkMemoryPropertyFlags& outRequiredFlags, + VkMemoryPropertyFlags& outPreferredFlags, + VkMemoryPropertyFlags& outNotPreferredFlags) +{ + outRequiredFlags = allocCreateInfo.requiredFlags; + outPreferredFlags = allocCreateInfo.preferredFlags; + outNotPreferredFlags = 0; + + switch(allocCreateInfo.usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + case VMA_MEMORY_USAGE_AUTO: + case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: + case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: + { + if(bufImgUsage == VmaBufferImageUsage::UNKNOWN) + { + VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known." + " Maybe you use VkBufferUsageFlags2CreateInfoKHR but forgot to use VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT?" ); + return false; + } + + const bool deviceAccess = bufImgUsage.ContainsDeviceAccess(); + const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; + const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; + const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; + const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; + + // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. + if(hostAccessRandom) + { + // Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)! + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. + // Omitting HOST_VISIBLE here is intentional. + // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. + // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + else + { + // Always CPU memory. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + } + // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. + else if(hostAccessSequentialWrite) + { + // Want uncached and write-combined. + outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + else + { + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) + if(deviceAccess) + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) + else + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. + if(preferDevice) + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + } + } + // No CPU access + else + { + // if(deviceAccess) + // + // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory, + // unless there is a clear preference from the user not to do so. + // + // else: + // + // No direct GPU access, no CPU access, just transfers. + // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or + // a "swap file" copy to free some GPU memory (then better CPU memory). + // Up to the user to decide. If no preferece, assume the former and choose GPU memory. + + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + } + default: + VMA_ASSERT(0); + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if (ptr != VMA_NULL) + { + for (size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if (srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + return VMA_NULL; +} + +#if VMA_STATS_STRING_ENABLED +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) +{ + if (srcStr != VMA_NULL) + { + char* const result = vma_new_array(allocs, char, strLen + 1); + memcpy(result, srcStr, strLen); + result[strLen] = '\0'; + return result; + } + return VMA_NULL; +} +#endif // VMA_STATS_STRING_ENABLED + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if (str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} +#endif // _VMA_FUNCTIONS + +#ifndef _VMA_STATISTICS_FUNCTIONS + +static void VmaClearStatistics(VmaStatistics& outStats) +{ + outStats.blockCount = 0; + outStats.allocationCount = 0; + outStats.blockBytes = 0; + outStats.allocationBytes = 0; +} + +static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) +{ + inoutStats.blockCount += src.blockCount; + inoutStats.allocationCount += src.allocationCount; + inoutStats.blockBytes += src.blockBytes; + inoutStats.allocationBytes += src.allocationBytes; +} + +static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) +{ + VmaClearStatistics(outStats.statistics); + outStats.unusedRangeCount = 0; + outStats.allocationSizeMin = VK_WHOLE_SIZE; + outStats.allocationSizeMax = 0; + outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; + outStats.unusedRangeSizeMax = 0; +} + +static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.statistics.allocationCount++; + inoutStats.statistics.allocationBytes += size; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); +} + +static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.unusedRangeCount++; + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); +} + +static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) +{ + VmaAddStatistics(inoutStats.statistics, src.statistics); + inoutStats.unusedRangeCount += src.unusedRangeCount; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); +} + +#endif // _VMA_STATISTICS_FUNCTIONS + +#ifndef _VMA_MUTEX_LOCK +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->Lock(); } + } + ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } + +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockRead(); } + } + ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) + : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockWrite(); } + } + ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif +#endif // _VMA_MUTEX_LOCK + +#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT +// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. +template +struct AtomicTransactionalIncrement +{ +public: + using T = decltype(AtomicT().load()); + + ~AtomicTransactionalIncrement() + { + if(m_Atomic) + --(*m_Atomic); + } + + void Commit() { m_Atomic = VMA_NULL; } + T Increment(AtomicT* atomic) + { + m_Atomic = atomic; + return m_Atomic->fetch_add(1); + } + +private: + AtomicT* m_Atomic = VMA_NULL; +}; +#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT + +#ifndef _VMA_STL_ALLOCATOR +// STL-compatible allocator. +template +struct VmaStlAllocator +{ + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} + template + VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} + VmaStlAllocator(const VmaStlAllocator&) = default; + VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } +}; +#endif // _VMA_STL_ALLOCATOR + +#ifndef _VMA_VECTOR +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + + VmaVector(const AllocatorT& allocator); + VmaVector(size_t count, const AllocatorT& allocator); + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} + VmaVector(const VmaVector& src); + VmaVector& operator=(const VmaVector& rhs); + ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + const T* data() const { return m_pArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + const_iterator cbegin() const { return m_pArray; } + const_iterator cend() const { return m_pArray + m_Count; } + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void reserve(size_t newCapacity, bool freeMemory = false); + void resize(size_t newCount); + void clear() { resize(0); } + void shrink_to_fit(); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +#ifndef _VMA_VECTOR_FUNCTIONS +template +VmaVector::VmaVector(const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) {} + +template +VmaVector::VmaVector(size_t count, const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) {} + +template +VmaVector::VmaVector(const VmaVector& src) + : m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) +{ + if (m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } +} + +template +VmaVector& VmaVector::operator=(const VmaVector& rhs) +{ + if (&rhs != this) + { + resize(rhs.m_Count); + if (m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; +} + +template +void VmaVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; +} + +template +void VmaVector::reserve(size_t newCapacity, bool freeMemory) +{ + newCapacity = VMA_MAX(newCapacity, m_Count); + + if ((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if (m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } +} + +template +void VmaVector::resize(size_t newCount) +{ + size_t newCapacity = m_Capacity; + if (newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if (elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; +} + +template +void VmaVector::shrink_to_fit() +{ + if (m_Capacity > m_Count) + { + T* newArray = VMA_NULL; + if (m_Count > 0) + { + newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = m_Count; + m_pArray = newArray; + } +} + +template +void VmaVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if (index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; +} + +template +void VmaVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_VECTOR_FUNCTIONS + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} +#endif // _VMA_VECTOR + +#ifndef _VMA_SMALL_VECTOR +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ +template +class VmaSmallVector +{ +public: + typedef T value_type; + typedef T* iterator; + + VmaSmallVector(const AllocatorT& allocator); + VmaSmallVector(size_t count, const AllocatorT& allocator); + template + VmaSmallVector(const VmaSmallVector&) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector&) = delete; + ~VmaSmallVector() = default; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void resize(size_t newCount, bool freeMemory = false); + void clear(bool freeMemory = false); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +#ifndef _VMA_SMALL_VECTOR_FUNCTIONS +template +VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) + : m_Count(0), + m_DynamicArray(allocator) {} + +template +VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) + : m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) {} + +template +void VmaSmallVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; +} + +template +void VmaSmallVector::resize(size_t newCount, bool freeMemory) +{ + if (newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else if (newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount); + if (m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if (newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if (newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; +} + +template +void VmaSmallVector::clear(bool freeMemory) +{ + m_DynamicArray.clear(); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + m_Count = 0; +} + +template +void VmaSmallVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if (index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; +} + +template +void VmaSmallVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_SMALL_VECTOR_FUNCTIONS +#endif // _VMA_SMALL_VECTOR + +#ifndef _VMA_POOL_ALLOCATOR +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types&&... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector> m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for (size_t i = m_ItemBlocks.size(); i--;) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types&&... args) +{ + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if (block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result) T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = + { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 + }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} +#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS +#endif // _VMA_POOL_ALLOCATOR + +#ifndef _VMA_RAW_LIST +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. + ~VmaRawList() = default; + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Front() const { return m_pFront; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushFront(); + ItemType* PushBack(); + ItemType* PushFront(const T& value); + ItemType* PushBack(const T& value); + void PopFront(); + void PopBack(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Clear(); + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +#ifndef _VMA_RAW_LIST_FUNCTIONS +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) {} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if (IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if (pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::Clear() +{ + if (IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while (pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} +#endif // _VMA_RAW_LIST_FUNCTIONS +#endif // _VMA_RAW_LIST + +#ifndef _VMA_LIST +template +class VmaList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaList) +public: + class reverse_iterator; + class const_iterator; + class const_reverse_iterator; + + class iterator + { + friend class const_iterator; + friend class VmaList; + public: + iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + iterator operator++(int) { iterator result = *this; ++*this; return result; } + iterator operator--(int) { iterator result = *this; --*this; return result; } + + iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class reverse_iterator + { + friend class const_reverse_iterator; + friend class VmaList; + public: + reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } + reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } + + reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + reverse_iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_iterator + { + friend class VmaList; + public: + const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } + const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } + + const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + const_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_reverse_iterator + { + friend class VmaList; + public: + const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } + const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } + + const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + const_reverse_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } + reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } + const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator rbegin() const { return crbegin(); } + const_reverse_iterator rend() const { return crend(); } + + void push_back(const T& value) { m_RawList.PushBack(value); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + + void clear() { m_RawList.Clear(); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + +private: + VmaRawList m_RawList; +}; + +#ifndef _VMA_LIST_FUNCTIONS +template +typename VmaList::iterator& VmaList::iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Front(); + } + return *this; +} + +template +typename VmaList::const_iterator& VmaList::const_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} +#endif // _VMA_LIST_FUNCTIONS +#endif // _VMA_LIST + +#ifndef _VMA_INTRUSIVE_LINKED_LIST +/* +Expected interface of ItemTypeTraits: +struct MyItemTypeTraits +{ + typedef MyItem ItemType; + static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } + static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } + static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } + static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } +}; +*/ +template +class VmaIntrusiveLinkedList +{ +public: + typedef typename ItemTypeTraits::ItemType ItemType; + static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } + static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } + + // Movable, not copyable. + VmaIntrusiveLinkedList() = default; + VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); + VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; + VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); + VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; + ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + ItemType* Front() { return m_Front; } + ItemType* Back() { return m_Back; } + const ItemType* Front() const { return m_Front; } + const ItemType* Back() const { return m_Back; } + + void PushBack(ItemType* item); + void PushFront(ItemType* item); + ItemType* PopBack(); + ItemType* PopFront(); + + // MyItem can be null - it means PushBack. + void InsertBefore(ItemType* existingItem, ItemType* newItem); + // MyItem can be null - it means PushFront. + void InsertAfter(ItemType* existingItem, ItemType* newItem); + void Remove(ItemType* item); + void RemoveAll(); + +private: + ItemType* m_Front = VMA_NULL; + ItemType* m_Back = VMA_NULL; + size_t m_Count = 0; +}; + +#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +template +VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) + : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) +{ + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; +} + +template +VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) +{ + if (&src != this) + { + VMA_HEAVY_ASSERT(IsEmpty()); + m_Front = src.m_Front; + m_Back = src.m_Back; + m_Count = src.m_Count; + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; + } + return *this; +} + +template +void VmaIntrusiveLinkedList::PushBack(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessPrev(item) = m_Back; + ItemTypeTraits::AccessNext(m_Back) = item; + m_Back = item; + ++m_Count; + } +} + +template +void VmaIntrusiveLinkedList::PushFront(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessNext(item) = m_Front; + ItemTypeTraits::AccessPrev(m_Front) = item; + m_Front = item; + ++m_Count; + } +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const backItem = m_Back; + ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; + } + m_Back = prevItem; + --m_Count; + ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; + ItemTypeTraits::AccessNext(backItem) = VMA_NULL; + return backItem; +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const frontItem = m_Front; + ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; + } + m_Front = nextItem; + --m_Count; + ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; + ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; + return frontItem; +} + +template +void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); + ItemTypeTraits::AccessPrev(newItem) = prevItem; + ItemTypeTraits::AccessNext(newItem) = existingItem; + ItemTypeTraits::AccessPrev(existingItem) = newItem; + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Front == existingItem); + m_Front = newItem; + } + ++m_Count; + } + else + PushBack(newItem); +} + +template +void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); + ItemTypeTraits::AccessNext(newItem) = nextItem; + ItemTypeTraits::AccessPrev(newItem) = existingItem; + ItemTypeTraits::AccessNext(existingItem) = newItem; + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Back == existingItem); + m_Back = newItem; + } + ++m_Count; + } + else + return PushFront(newItem); +} + +template +void VmaIntrusiveLinkedList::Remove(ItemType* item) +{ + VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); + if (ItemTypeTraits::GetPrev(item) != VMA_NULL) + { + ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); + } + else + { + VMA_HEAVY_ASSERT(m_Front == item); + m_Front = ItemTypeTraits::GetNext(item); + } + + if (ItemTypeTraits::GetNext(item) != VMA_NULL) + { + ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); + } + else + { + VMA_HEAVY_ASSERT(m_Back == item); + m_Back = ItemTypeTraits::GetPrev(item); + } + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + --m_Count; +} + +template +void VmaIntrusiveLinkedList::RemoveAll() +{ + if (!IsEmpty()) + { + ItemType* item = m_Back; + while (item != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + item = prevItem; + } + m_Front = VMA_NULL; + m_Back = VMA_NULL; + m_Count = 0; + } +} +#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +#endif // _VMA_INTRUSIVE_LINKED_LIST + +#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED +class VmaStringBuilder +{ +public: + VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} + ~VmaStringBuilder() = default; + + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + void AddNewLine() { Add('\n'); } + void Add(char ch) { m_Data.push_back(ch); } + + void Add(const char* pStr); + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector> m_Data; +}; + +#ifndef _VMA_STRING_BUILDER_FUNCTIONS +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if (strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char* p = &buf[10]; + do + { + *--p = '0' + (char)(num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char* p = &buf[20]; + do + { + *--p = '0' + (char)(num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} +#endif //_VMA_STRING_BUILDER_FUNCTIONS +#endif // _VMA_STRING_BUILDER + +#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED +/* +Allows to conveniently build a correct JSON document to be written to the +VmaStringBuilder passed to the constructor. +*/ +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter) +public: + // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + // Begins object by writing "{". + // Inside an object, you must call pairs of WriteString and a value, e.g.: + // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); + // Will write: { "A": 1, "B": 2 } + void BeginObject(bool singleLine = false); + // Ends object by writing "}". + void EndObject(); + + // Begins array by writing "[". + // Inside an array, you can write a sequence of any values. + void BeginArray(bool singleLine = false); + // Ends array by writing "[". + void EndArray(); + + // Writes a string value inside "". + // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. + void WriteString(const char* pStr); + + // Begins writing a string value. + // Call BeginString, ContinueString, ContinueString, ..., EndString instead of + // WriteString to conveniently build the string content incrementally, made of + // parts including numbers. + void BeginString(const char* pStr = VMA_NULL); + // Posts next part of an open string. + void ContinueString(const char* pStr); + // Posts next part of an open string. The number is converted to decimal characters. + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + // Posts next part of an open string. Pointer value is converted to characters + // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 + void ContinueString_Pointer(const void* ptr); + // Ends writing a string value by writing '"'. + void EndString(const char* pStr = VMA_NULL); + + // Writes a number value. + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + // Writes a boolean value - false or true. + void WriteBool(bool b); + // Writes a null value. + void WriteNull(); + +private: + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + static const char* const INDENT; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; +const char* const VmaJsonWriter::INDENT = " "; + +#ifndef _VMA_JSON_WRITER_FUNCTIONS +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) + : m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) {} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for (size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if (ch == '\\') + { + m_SB.Add("\\\\"); + } + else if (ch == '"') + { + m_SB.Add("\\\""); + } + else if ((uint8_t)ch >= 32) + { + m_SB.Add(ch); + } + else switch (ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if (!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if (currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if (!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if (count > 0 && oneLess) + { + --count; + } + for (size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} +#endif // _VMA_JSON_WRITER_FUNCTIONS + +static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) +{ + json.BeginObject(); + + json.WriteString("BlockCount"); + json.WriteNumber(stat.statistics.blockCount); + json.WriteString("BlockBytes"); + json.WriteNumber(stat.statistics.blockBytes); + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); + json.WriteString("AllocationBytes"); + json.WriteNumber(stat.statistics.allocationBytes); + json.WriteString("UnusedRangeCount"); + json.WriteNumber(stat.unusedRangeCount); + + if (stat.statistics.allocationCount > 1) + { + json.WriteString("AllocationSizeMin"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("AllocationSizeMax"); + json.WriteNumber(stat.allocationSizeMax); + } + if (stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSizeMin"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("UnusedRangeSizeMax"); + json.WriteNumber(stat.unusedRangeSizeMax); + } + json.EndObject(); +} +#endif // _VMA_JSON_WRITER + +#ifndef _VMA_MAPPING_HYSTERESIS + +class VmaMappingHysteresis +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis) +public: + VmaMappingHysteresis() = default; + + uint32_t GetExtraMapping() const { return m_ExtraMapping; } + + // Call when Map was called. + // Returns true if switched to extra +1 mapping reference count. + bool PostMap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + { + m_ExtraMapping = 1; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + + // Call when Unmap was called. + void PostUnmap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + ++m_MajorCounter; + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was made from the memory block. + void PostAlloc() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + ++m_MajorCounter; + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was freed from the memory block. + // Returns true if switched to extra -1 mapping reference count. + bool PostFree() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + m_MajorCounter > m_MinorCounter + 1) + { + m_ExtraMapping = 0; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + +private: + static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; + + uint32_t m_MinorCounter = 0; + uint32_t m_MajorCounter = 0; + uint32_t m_ExtraMapping = 0; // 0 or 1. + + void PostMinorCounter() + { + if(m_MinorCounter < m_MajorCounter) + { + ++m_MinorCounter; + } + else if(m_MajorCounter > 0) + { + --m_MajorCounter; + --m_MinorCounter; + } + } +}; + +#endif // _VMA_MAPPING_HYSTERESIS + +#if VMA_EXTERNAL_MEMORY_WIN32 +class VmaWin32Handle +{ +public: + VmaWin32Handle() noexcept : m_hHandle(VMA_NULL) { } + explicit VmaWin32Handle(HANDLE hHandle) noexcept : m_hHandle(hHandle) { } + ~VmaWin32Handle() noexcept { if (m_hHandle != VMA_NULL) { ::CloseHandle(m_hHandle); } } + VMA_CLASS_NO_COPY_NO_MOVE(VmaWin32Handle) + +public: + // Strengthened + VkResult GetHandle(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, bool useMutex, HANDLE* pHandle) noexcept + { + *pHandle = VMA_NULL; + // Try to get handle first. + if (m_hHandle != VMA_NULL) + { + *pHandle = Duplicate(hTargetProcess); + return VK_SUCCESS; + } + + VkResult res = VK_SUCCESS; + // If failed, try to create it. + { + VmaMutexLockWrite lock(m_Mutex, useMutex); + if (m_hHandle == VMA_NULL) + { + res = Create(device, memory, pvkGetMemoryWin32HandleKHR, &m_hHandle); + } + } + + *pHandle = Duplicate(hTargetProcess); + return res; + } + + operator bool() const noexcept { return m_hHandle != VMA_NULL; } +private: + // Not atomic + static VkResult Create(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE* pHandle) noexcept + { + VkResult res = VK_ERROR_FEATURE_NOT_PRESENT; + if (pvkGetMemoryWin32HandleKHR != VMA_NULL) + { + VkMemoryGetWin32HandleInfoKHR handleInfo{ }; + handleInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR; + handleInfo.memory = memory; + handleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; + res = pvkGetMemoryWin32HandleKHR(device, &handleInfo, pHandle); + } + return res; + } + HANDLE Duplicate(HANDLE hTargetProcess = VMA_NULL) const noexcept + { + if (!m_hHandle) + return m_hHandle; + + HANDLE hCurrentProcess = ::GetCurrentProcess(); + HANDLE hDupHandle = VMA_NULL; + if (!::DuplicateHandle(hCurrentProcess, m_hHandle, hTargetProcess ? hTargetProcess : hCurrentProcess, &hDupHandle, 0, FALSE, DUPLICATE_SAME_ACCESS)) + { + VMA_ASSERT(0 && "Failed to duplicate handle."); + } + return hDupHandle; + } +private: + HANDLE m_hHandle; + VMA_RW_MUTEX m_Mutex; // Protects access m_Handle +}; +#else +class VmaWin32Handle +{ + // ABI compatibility + void* placeholder = VMA_NULL; + VMA_RW_MUTEX placeholder2; +}; +#endif // VMA_EXTERNAL_MEMORY_WIN32 + + +#ifndef _VMA_DEVICE_MEMORY_BLOCK +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: +- Access to m_pMetadata must be externally synchronized. +- Map, Unmap, Bind* are synchronized internally. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + ~VmaDeviceMemoryBlock(); + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + uint32_t GetMapRefCount() const { return m_MapCount; } + + // Call when allocation/free was made from m_pMetadata. + // Used for m_MappingHysteresis. + void PostAlloc(VmaAllocator hAllocator); + void PostFree(VmaAllocator hAllocator); + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); +#if VMA_EXTERNAL_MEMORY_WIN32 + VkResult CreateWin32Handle( + const VmaAllocator hAllocator, + PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, + HANDLE hTargetProcess, + HANDLE* pHandle)noexcept; +#endif // VMA_EXTERNAL_MEMORY_WIN32 +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_MapAndBindMutex; + VmaMappingHysteresis m_MappingHysteresis; + uint32_t m_MapCount; + void* m_pMappedData; + + VmaWin32Handle m_Handle; +}; +#endif // _VMA_DEVICE_MEMORY_BLOCK + +#ifndef _VMA_ALLOCATION_T +struct VmaAllocationExtraData +{ + void* m_pMappedData = VMA_NULL; // Not null means memory is mapped. + VmaWin32Handle m_Handle; +}; + +struct VmaAllocation_T +{ + friend struct VmaDedicatedAllocationListItemTraits; + + enum FLAGS + { + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + // This struct is allocated using VmaPoolAllocator. + VmaAllocation_T(bool mappingAllowed); + ~VmaAllocation_T(); + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped); + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + VmaAllocator allocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size); + void Destroy(VmaAllocator allocator); + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + void* GetUserData() const { return m_pUserData; } + const char* GetName() const { return m_pName; } + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } + bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } + + void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char* pName); + void FreeName(VmaAllocator hAllocator); + uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); + VmaAllocHandle GetAllocHandle() const; + VkDeviceSize GetOffset() const; + VmaPool GetParentPool() const; + VkDeviceMemory GetMemory() const; + void* GetMappedData() const; + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + VmaBufferImageUsage GetBufferImageUsage() const { return m_BufferImageUsage; } + void InitBufferUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5) + { + VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); + m_BufferImageUsage = VmaBufferImageUsage(createInfo, useKhrMaintenance5); + } + void InitImageUsage(const VkImageCreateInfo &createInfo) + { + VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); + m_BufferImageUsage = VmaBufferImageUsage(createInfo); + } + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +#if VMA_EXTERNAL_MEMORY_WIN32 + VkResult GetWin32Handle(VmaAllocator hAllocator, HANDLE hTargetProcess, HANDLE* hHandle) noexcept; +#endif // VMA_EXTERNAL_MEMORY_WIN32 + +private: + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VmaAllocHandle m_AllocHandle; + }; + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + VkDeviceMemory m_hMemory; + VmaAllocationExtraData* m_ExtraData; + VmaAllocation_T* m_Prev; + VmaAllocation_T* m_Next; + }; + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + char* m_pName; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS +#if VMA_STATS_STRING_ENABLED + VmaBufferImageUsage m_BufferImageUsage; // 0 if unknown. +#endif + + void EnsureExtraData(VmaAllocator hAllocator); +}; +#endif // _VMA_ALLOCATION_T + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS +struct VmaDedicatedAllocationListItemTraits +{ + typedef VmaAllocation_T ItemType; + + static ItemType* GetPrev(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType* GetNext(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } + static ItemType*& AccessPrev(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType*& AccessNext(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } +}; +#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST +/* +Stores linked list of VmaAllocation_T objects. +Thread-safe, synchronized internally. +*/ +class VmaDedicatedAllocationList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList) +public: + VmaDedicatedAllocationList() {} + ~VmaDedicatedAllocationList(); + + void Init(bool useMutex) { m_UseMutex = useMutex; } + bool Validate(); + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics& inoutStats); +#if VMA_STATS_STRING_ENABLED + // Writes JSON array with the list of allocations. + void BuildStatsString(VmaJsonWriter& json); +#endif + + bool IsEmpty(); + void Register(VmaAllocation alloc); + void Unregister(VmaAllocation alloc); + +private: + typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; + + bool m_UseMutex = true; + VMA_RW_MUTEX m_Mutex; + DedicatedAllocationLinkedList m_AllocationList; +}; + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS + +VmaDedicatedAllocationList::~VmaDedicatedAllocationList() +{ + VMA_HEAVY_ASSERT(Validate()); + + if (!m_AllocationList.IsEmpty()) + { + VMA_ASSERT_LEAK(false && "Unfreed dedicated allocations found!"); + } +} + +bool VmaDedicatedAllocationList::Validate() +{ + const size_t declaredCount = m_AllocationList.GetCount(); + size_t actualCount = 0; + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + ++actualCount; + } + VMA_VALIDATE(actualCount == declaredCount); + + return true; +} + +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); + } +} + +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + + const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); + inoutStats.blockCount += allocCount; + inoutStats.allocationCount += allocCount; + + for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + json.BeginArray(); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + json.BeginObject(true); + alloc->PrintParameters(json); + json.EndObject(); + } + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaDedicatedAllocationList::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + return m_AllocationList.IsEmpty(); +} + +void VmaDedicatedAllocationList::Register(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.PushBack(alloc); +} + +void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.Remove(alloc); +} +#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS +#endif // _VMA_DEDICATED_ALLOCATION_LIST + +#ifndef _VMA_SUBALLOCATION +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + void* userData; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; + +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +struct VmaSuballocationItemSizeLess +{ + bool operator()(const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + + bool operator()(const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; +#endif // _VMA_SUBALLOCATION + +#ifndef _VMA_ALLOCATION_REQUEST +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. +item points to a FREE suballocation. +*/ +struct VmaAllocationRequest +{ + VmaAllocHandle allocHandle; + VkDeviceSize size; + VmaSuballocationList::iterator item; + void* customData; + uint64_t algorithmData; + VmaAllocationRequestType type; +}; +#endif // _VMA_ALLOCATION_REQUEST + +#ifndef _VMA_BLOCK_METADATA +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata) +public: + // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. + VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata() = default; + + virtual void Init(VkDeviceSize size) { m_Size = size; } + bool IsVirtual() const { return m_IsVirtual; } + VkDeviceSize GetSize() const { return m_Size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + virtual size_t GetAllocationCount() const = 0; + virtual size_t GetFreeRegionsCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; + virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + + virtual VmaAllocHandle GetAllocationListBegin() const = 0; + virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; + virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; + + // Shouldn't modify blockCount. + virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(VmaAllocHandle allocHandle) = 0; + + // Frees all allocations. + // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! + virtual void Clear() = 0; + + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; + virtual void DebugLogAllAllocations() const = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); } + + void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; +#if VMA_STATS_STRING_ENABLED + // mapRefCount == UINT32_MAX means unspecified. + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkDeviceSize m_BufferImageGranularity; + const bool m_IsVirtual; +}; + +#ifndef _VMA_BLOCK_METADATA_FUNCTIONS +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : m_Size(0), + m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), + m_IsVirtual(isVirtual) {} + +void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + if (IsVirtual()) + { + VMA_LEAK_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p", offset, size, userData); + } + else + { + VMA_ASSERT(userData != VMA_NULL); + VmaAllocation allocation = reinterpret_cast(userData); + + userData = allocation->GetUserData(); + const char* name = allocation->GetName(); + +#if VMA_STATS_STRING_ENABLED + VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %s; Usage: %" PRIu64, + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + (uint64_t)allocation->GetBufferImageUsage().Value); +#else + VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %u", + offset, size, userData, name ? name : "vma_empty", + (unsigned)allocation->GetSuballocationType()); +#endif // VMA_STATS_STRING_ENABLED + } + +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const +{ + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + if (IsVirtual()) + { + json.WriteString("Size"); + json.WriteNumber(size); + if (userData) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(userData); + json.EndString(); + } + } + else + { + ((VmaAllocation)userData)->PrintParameters(json); + } + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_FUNCTIONS +#endif // _VMA_BLOCK_METADATA + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +// Before deleting object of this class remember to call 'Destroy()' +class VmaBlockBufferImageGranularity final +{ +public: + struct ValidationContext + { + const VkAllocationCallbacks* allocCallbacks; + uint16_t* pageAllocs; + }; + + VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); + ~VmaBlockBufferImageGranularity(); + + bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } + + void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + // Before destroying object you must call free it's memory + void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + + void RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const; + + bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const; + + void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); + void FreePages(VkDeviceSize offset, VkDeviceSize size); + void Clear(); + + ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext& ctx) const; + +private: + static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + + VkDeviceSize m_BufferImageGranularity; + uint32_t m_RegionCount; + RegionInfo* m_RegionInfo; + + uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } + uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } + + uint32_t OffsetToPageIndex(VkDeviceSize offset) const; + void AllocPage(RegionInfo& page, uint8_t allocType); +}; + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) + : m_BufferImageGranularity(bufferImageGranularity), + m_RegionCount(0), + m_RegionInfo(VMA_NULL) {} + +VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() +{ + VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); +} + +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +{ + if (IsEnabled()) + { + m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); + m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); + } +} + +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +{ + if (m_RegionInfo) + { + vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); + m_RegionInfo = VMA_NULL; + } +} + +void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const +{ + if (m_BufferImageGranularity > 1 && + m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) + { + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); + inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); + } + } +} + +bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(inOutAllocOffset); + if (m_RegionInfo[startPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) + { + inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); + if (blockSize < allocSize + inOutAllocOffset - blockOffset) + return true; + ++startPage; + } + uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); + if (endPage != startPage && + m_RegionInfo[endPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) + { + return true; + } + } + return false; +} + +void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + AllocPage(m_RegionInfo[startPage], allocType); + + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + AllocPage(m_RegionInfo[endPage], allocType); + } +} + +void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + --m_RegionInfo[startPage].allocCount; + if (m_RegionInfo[startPage].allocCount == 0) + m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + { + --m_RegionInfo[endPage].allocCount; + if (m_RegionInfo[endPage].allocCount == 0) + m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + } + } +} + +void VmaBlockBufferImageGranularity::Clear() +{ + if (m_RegionInfo) + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); +} + +VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( + const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const +{ + ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + if (!isVirutal && IsEnabled()) + { + ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); + memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); + } + return ctx; +} + +bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, + VkDeviceSize offset, VkDeviceSize size) const +{ + if (IsEnabled()) + { + uint32_t start = GetStartPage(offset); + ++ctx.pageAllocs[start]; + VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); + + uint32_t end = GetEndPage(offset, size); + if (start != end) + { + ++ctx.pageAllocs[end]; + VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); + } + } + return true; +} + +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +{ + // Check proper page structure + if (IsEnabled()) + { + VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); + + for (uint32_t page = 0; page < m_RegionCount; ++page) + { + VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); + } + vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); + ctx.pageAllocs = VMA_NULL; + } + return true; +} + +uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const +{ + return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); +} + +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +{ + // When current alloc type is free then it can be overridden by new type + if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) + page.allocType = allocType; + + ++page.allocCount; +} +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY + +#ifndef _VMA_BLOCK_METADATA_LINEAR +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Linear() = default; + + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return GetAllocationCount() == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + size_t GetAllocationCount() const override; + size_t GetFreeRegionsCount() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector> SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), + m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) {} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if (!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + if (!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + const VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize offset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.userData == VMA_NULL); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return SIZE_MAX; +} + +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if (lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if (lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if (lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.blockCount++; + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size - m_SumFreeSize; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + if(allocSize > GetSize()) + return false; + + pAllocationRequest->size = allocSize; + return upperAddress ? + CreateAllocationRequest_UpperAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest); +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + VMA_ASSERT(!IsVirtual()); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; + + switch (request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(offset + request.size <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch (m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; + + if (!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if (firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.userData = VMA_NULL; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the middle of 1st vector. + { + const SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return 0; +} + +void VmaBlockMetadata_Linear::Clear() +{ + m_SumFreeSize = GetSize(); + m_Suballocations0.clear(); + m_Suballocations1.clear(); + // Leaving m_1stVectorIndex unchanged - it doesn't matter. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; +} + +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Linear::DebugLogAllAllocations() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); + + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); +} + +VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the 1st vector. + { + SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + return const_cast(*it); + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + return const_cast(*it); + } + } + + VMA_ASSERT(0 && "Allocation not found in linear allocator!"); + return const_cast(suballocations1st.back()); // Should never occur. +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while (m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while (m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if (ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++srcIndex; + } + if (dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if (suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while (m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : blockSize; + + // There is enough free space at the end after alignment. + if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + size_t index1st = m_1stNullItemsBeginCount; + + // There is enough free space at the end after alignment. + if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || + (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) + { + for (size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if (allocSize > blockSize) + { + return false; + } + VkDeviceSize resultBaseOffset = blockSize - allocSize; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if (allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // Apply debugMargin at the end. + if (debugMargin > 0) + { + if (resultOffset < debugMargin) + { + return false; + } + resultOffset -= debugMargin; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if (endOf1st + debugMargin <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (bufferImageGranularity > 1) + { + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item unused. + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} +#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_LINEAR + +#ifndef _VMA_BLOCK_METADATA_TLSF +// To not search current larger region if first allocation won't succeed and skip to smaller range +// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). +// When fragmentation and reusal of previous blocks doesn't matter then use with +// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. +class VmaBlockMetadata_TLSF : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF) +public: + VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_TLSF(); + + size_t GetAllocationCount() const override { return m_AllocCount; } + size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } + VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } + bool IsEmpty() const override { return m_NullBlock->offset == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + // According to original paper it should be preferable 4 or 5: + // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" + // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf + static const uint8_t SECOND_LEVEL_INDEX = 5; + static const uint16_t SMALL_BUFFER_SIZE = 256; + static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; + static const uint8_t MEMORY_CLASS_SHIFT = 7; + static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; + + class Block + { + public: + VkDeviceSize offset; + VkDeviceSize size; + Block* prevPhysical; + Block* nextPhysical; + + void MarkFree() { prevFree = VMA_NULL; } + void MarkTaken() { prevFree = this; } + bool IsFree() const { return prevFree != this; } + void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } + Block*& PrevFree() { return prevFree; } + Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + + private: + Block* prevFree; // Address of the same block here indicates that block is taken + union + { + Block* nextFree; + void* userData; + }; + }; + + size_t m_AllocCount; + // Total number of free blocks besides null block + size_t m_BlocksFreeCount; + // Total size of free blocks excluding null block + VkDeviceSize m_BlocksFreeSize; + uint32_t m_IsFreeBitmap; + uint8_t m_MemoryClasses; + uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; + uint32_t m_ListsCount; + /* + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block** m_FreeList; + VmaPoolAllocator m_BlockAllocator; + Block* m_NullBlock; + VmaBlockBufferImageGranularity m_GranularityHandler; + + uint8_t SizeToMemoryClass(VkDeviceSize size) const; + uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; + uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; + uint32_t GetListIndex(VkDeviceSize size) const; + + void RemoveFreeBlock(Block* block); + void InsertFreeBlock(Block* block); + void MergeBlock(Block* block, Block* prev); + + Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + bool CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFreeBitmap(0), + m_MemoryClasses(0), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) {} + +VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() +{ + if (m_FreeList) + vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); + m_GranularityHandler.Destroy(GetAllocationCallbacks()); +} + +void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + if (!IsVirtual()) + m_GranularityHandler.Init(GetAllocationCallbacks(), size); + + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = size; + m_NullBlock->offset = 0; + m_NullBlock->prevPhysical = VMA_NULL; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t sli = SizeToSecondIndex(size, memoryClass); + m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; + if (IsVirtual()) + m_ListsCount += 1UL << SECOND_LEVEL_INDEX; + else + m_ListsCount += 4; + + m_MemoryClasses = memoryClass + uint8_t(2); + memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); + + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); +} + +bool VmaBlockMetadata_TLSF::Validate() const +{ + VMA_VALIDATE(GetSumFreeSize() <= GetSize()); + + VkDeviceSize calculatedSize = m_NullBlock->size; + VkDeviceSize calculatedFreeSize = m_NullBlock->size; + size_t allocCount = 0; + size_t freeCount = 0; + + // Check integrity of free lists + for (uint32_t list = 0; list < m_ListsCount; ++list) + { + Block* block = m_FreeList[list]; + if (block != VMA_NULL) + { + VMA_VALIDATE(block->IsFree()); + VMA_VALIDATE(block->PrevFree() == VMA_NULL); + while (block->NextFree()) + { + VMA_VALIDATE(block->NextFree()->IsFree()); + VMA_VALIDATE(block->NextFree()->PrevFree() == block); + block = block->NextFree(); + } + } + } + + VkDeviceSize nextOffset = m_NullBlock->offset; + auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); + + VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); + if (m_NullBlock->prevPhysical) + { + VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); + } + // Check all blocks + for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + { + VMA_VALIDATE(prev->offset + prev->size == nextOffset); + nextOffset = prev->offset; + calculatedSize += prev->size; + + uint32_t listIndex = GetListIndex(prev->size); + if (prev->IsFree()) + { + ++freeCount; + // Check if free block belongs to free list + Block* freeBlock = m_FreeList[listIndex]; + VMA_VALIDATE(freeBlock != VMA_NULL); + + bool found = false; + do + { + if (freeBlock == prev) + found = true; + + freeBlock = freeBlock->NextFree(); + } while (!found && freeBlock != VMA_NULL); + + VMA_VALIDATE(found); + calculatedFreeSize += prev->size; + } + else + { + ++allocCount; + // Check if taken block is not on a free list + Block* freeBlock = m_FreeList[listIndex]; + while (freeBlock) + { + VMA_VALIDATE(freeBlock != prev); + freeBlock = freeBlock->NextFree(); + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); + } + } + + if (prev->prevPhysical) + { + VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); + } + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); + } + + VMA_VALIDATE(nextOffset == 0); + VMA_VALIDATE(calculatedSize == GetSize()); + VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); + VMA_VALIDATE(allocCount == m_AllocCount); + VMA_VALIDATE(freeCount == m_BlocksFreeCount); + + return true; +} + +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + if (m_NullBlock->size > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); + + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree()) + VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); + else + VmaAddDetailedStatisticsAllocation(inoutStats, block->size); + } +} + +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +{ + size_t blockCount = m_AllocCount + m_BlocksFreeCount; + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); + + size_t i = blockCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + blockList[--i] = block; + } + VMA_ASSERT(i == 0); + + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin(json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount); + + for (; i < blockCount; ++i) + { + Block* block = blockList[i]; + if (block->IsFree()) + PrintDetailedMap_UnusedRange(json, block->offset, block->size); + else + PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); + } + if (m_NullBlock->size > 0) + PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); + + PrintDetailedMap_End(json); +} +#endif + +bool VmaBlockMetadata_TLSF::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // For small granularity round up + if (!IsVirtual()) + m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); + + allocSize += GetDebugMargin(); + // Quick check for too small pool + if (allocSize > GetSumFreeSize()) + return false; + + // If no free blocks in pool then check only null block + if (m_BlocksFreeCount == 0) + return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); + + // Round up to the next block + VkDeviceSize sizeForNextList = allocSize; + VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4)); + if (allocSize > SMALL_BUFFER_SIZE) + { + sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); + } + else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) + sizeForNextList = SMALL_BUFFER_SIZE + 1; + else + sizeForNextList += smallSizeStep; + + uint32_t nextListIndex = m_ListsCount; + uint32_t prevListIndex = m_ListsCount; + Block* nextListBlock = VMA_NULL; + Block* prevListBlock = VMA_NULL; + + // Check blocks according to strategies + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) + { + // Quick check for larger block first + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // If not fitted then null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Null block failed, search larger bucket + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // Failed again, check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + { + // Perform search from the start + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(m_BlocksFreeCount, allocator); + + size_t i = m_BlocksFreeCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree() && block->size >= allocSize) + blockList[--i] = block; + } + + for (; i < m_BlocksFreeCount; ++i) + { + Block& block = *blockList[i]; + if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Whole range searched, no more memory + return false; + } + else + { + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + + // Worst case, full search has to be done + while (++nextListIndex < m_ListsCount) + { + nextListBlock = m_FreeList[nextListIndex]; + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + + // No more memory sadly + return false; +} + +VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (!block->IsFree()) + { + if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_TLSF::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); + + // Get block and pop it from the free list + Block* currentBlock = (Block*)request.allocHandle; + VkDeviceSize offset = request.algorithmData; + VMA_ASSERT(currentBlock != VMA_NULL); + VMA_ASSERT(currentBlock->offset <= offset); + + if (currentBlock != m_NullBlock) + RemoveFreeBlock(currentBlock); + + VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize misssingAlignment = offset - currentBlock->offset; + + // Append missing alignment to prev block or create new one + if (misssingAlignment) + { + Block* prevBlock = currentBlock->prevPhysical; + VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); + + if (prevBlock->IsFree() && prevBlock->size != debugMargin) + { + uint32_t oldList = GetListIndex(prevBlock->size); + prevBlock->size += misssingAlignment; + // Check if new size crosses list bucket + if (oldList != GetListIndex(prevBlock->size)) + { + prevBlock->size -= misssingAlignment; + RemoveFreeBlock(prevBlock); + prevBlock->size += misssingAlignment; + InsertFreeBlock(prevBlock); + } + else + m_BlocksFreeSize += misssingAlignment; + } + else + { + Block* newBlock = m_BlockAllocator.Alloc(); + currentBlock->prevPhysical = newBlock; + prevBlock->nextPhysical = newBlock; + newBlock->prevPhysical = prevBlock; + newBlock->nextPhysical = currentBlock; + newBlock->size = misssingAlignment; + newBlock->offset = currentBlock->offset; + newBlock->MarkTaken(); + + InsertFreeBlock(newBlock); + } + + currentBlock->size -= misssingAlignment; + currentBlock->offset += misssingAlignment; + } + + VkDeviceSize size = request.size + debugMargin; + if (currentBlock->size == size) + { + if (currentBlock == m_NullBlock) + { + // Setup new null block + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = 0; + m_NullBlock->offset = currentBlock->offset + size; + m_NullBlock->prevPhysical = currentBlock; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->PrevFree() = VMA_NULL; + m_NullBlock->NextFree() = VMA_NULL; + currentBlock->nextPhysical = m_NullBlock; + currentBlock->MarkTaken(); + } + } + else + { + VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); + + // Create new free block + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = currentBlock->size - size; + newBlock->offset = currentBlock->offset + size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + currentBlock->nextPhysical = newBlock; + currentBlock->size = size; + + if (currentBlock == m_NullBlock) + { + m_NullBlock = newBlock; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + currentBlock->MarkTaken(); + } + else + { + newBlock->nextPhysical->prevPhysical = newBlock; + newBlock->MarkTaken(); + InsertFreeBlock(newBlock); + } + } + currentBlock->UserData() = userData; + + if (debugMargin > 0) + { + currentBlock->size -= debugMargin; + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = debugMargin; + newBlock->offset = currentBlock->offset + currentBlock->size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + newBlock->MarkTaken(); + currentBlock->nextPhysical->prevPhysical = newBlock; + currentBlock->nextPhysical = newBlock; + InsertFreeBlock(newBlock); + } + + if (!IsVirtual()) + m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, + currentBlock->offset, currentBlock->size); + ++m_AllocCount; +} + +void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) +{ + Block* block = (Block*)allocHandle; + Block* next = block->nextPhysical; + VMA_ASSERT(!block->IsFree() && "Block is already free!"); + + if (!IsVirtual()) + m_GranularityHandler.FreePages(block->offset, block->size); + --m_AllocCount; + + VkDeviceSize debugMargin = GetDebugMargin(); + if (debugMargin > 0) + { + RemoveFreeBlock(next); + MergeBlock(next, block); + block = next; + next = next->nextPhysical; + } + + // Try merging + Block* prev = block->prevPhysical; + if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) + { + RemoveFreeBlock(prev); + MergeBlock(block, prev); + } + + if (!next->IsFree()) + InsertFreeBlock(block); + else if (next == m_NullBlock) + MergeBlock(m_NullBlock, block); + else + { + RemoveFreeBlock(next); + MergeBlock(next, block); + InsertFreeBlock(next); + } +} + +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); + outInfo.offset = block->offset; + outInfo.size = block->size; + outInfo.pUserData = block->UserData(); +} + +void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); + return block->UserData(); +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const +{ + if (m_AllocCount == 0) + return VK_NULL_HANDLE; + + for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + Block* startBlock = (Block*)prevAlloc; + VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); + + for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + Block* block = (Block*)alloc; + VMA_ASSERT(!block->IsFree() && "Incorrect block!"); + + if (block->prevPhysical) + return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; + return 0; +} + +void VmaBlockMetadata_TLSF::Clear() +{ + m_AllocCount = 0; + m_BlocksFreeCount = 0; + m_BlocksFreeSize = 0; + m_IsFreeBitmap = 0; + m_NullBlock->offset = 0; + m_NullBlock->size = GetSize(); + Block* block = m_NullBlock->prevPhysical; + m_NullBlock->prevPhysical = VMA_NULL; + while (block) + { + Block* prev = block->prevPhysical; + m_BlockAllocator.Free(block); + block = prev; + } + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); + m_GranularityHandler.Clear(); +} + +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); + block->UserData() = userData; +} + +void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + if (!block->IsFree()) + DebugLogAllocation(block->offset, block->size, block->UserData()); +} + +uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const +{ + if (size > SMALL_BUFFER_SIZE) + return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT); + return 0; +} + +uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const +{ + if (memoryClass == 0) + { + if (IsVirtual()) + return static_cast((size - 1) / 8); + else + return static_cast((size - 1) / 64); + } + return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const +{ + if (memoryClass == 0) + return secondIndex; + + const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; + if (IsVirtual()) + return index + (1 << SECOND_LEVEL_INDEX); + else + return index + 4; +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); +} + +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(block->IsFree()); + + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block->PrevFree(); + if (block->PrevFree() != VMA_NULL) + block->PrevFree()->NextFree() = block->NextFree(); + else + { + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(m_FreeList[index] == block); + m_FreeList[index] = block->NextFree(); + if (block->NextFree() == VMA_NULL) + { + m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); + if (m_InnerIsFreeBitmap[memClass] == 0) + m_IsFreeBitmap &= ~(1UL << memClass); + } + } + block->MarkTaken(); + block->UserData() = VMA_NULL; + --m_BlocksFreeCount; + m_BlocksFreeSize -= block->size; +} + +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); + + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(index < m_ListsCount); + block->PrevFree() = VMA_NULL; + block->NextFree() = m_FreeList[index]; + m_FreeList[index] = block; + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block; + else + { + m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; + m_IsFreeBitmap |= 1UL << memClass; + } + ++m_BlocksFreeCount; + m_BlocksFreeSize += block->size; +} + +void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +{ + VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!"); + VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); + + block->offset = prev->offset; + block->size += prev->size; + block->prevPhysical = prev->prevPhysical; + if (block->prevPhysical) + block->prevPhysical->nextPhysical = block; + m_BlockAllocator.Free(prev); +} + +VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); + if (!innerFreeMap) + { + // Check higher levels for available blocks + uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); + if (!freeMap) + return VMA_NULL; // No more memory available + + // Find lowest free region + memoryClass = VMA_BITSCAN_LSB(freeMap); + innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; + VMA_ASSERT(innerFreeMap != 0); + } + // Find lowest free subregion + listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); + VMA_ASSERT(m_FreeList[listIndex]); + return m_FreeList[listIndex]; +} + +bool VmaBlockMetadata_TLSF::CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(block.IsFree() && "Block is already taken!"); + + VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); + if (block.size < allocSize + alignedOffset - block.offset) + return false; + + // Check for granularity conflicts + if (!IsVirtual() && + m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) + return false; + + // Alloc successful + pAllocationRequest->type = VmaAllocationRequestType::TLSF; + pAllocationRequest->allocHandle = (VmaAllocHandle)█ + pAllocationRequest->size = allocSize - GetDebugMargin(); + pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->algorithmData = alignedOffset; + + // Place block at the start of list if it's normal block + if (listIndex != m_ListsCount && block.PrevFree()) + { + block.PrevFree()->NextFree() = block.NextFree(); + if (block.NextFree()) + block.NextFree()->PrevFree() = block.PrevFree(); + block.PrevFree() = VMA_NULL; + block.NextFree() = m_FreeList[listIndex]; + m_FreeList[listIndex] = █ + if (block.NextFree()) + block.NextFree()->PrevFree() = █ + } + + return true; +} +#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_TLSF + +#ifndef _VMA_BLOCK_VECTOR +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +class VmaBlockVector +{ + friend struct VmaDefragmentationContext_T; + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext); + ~VmaBlockVector(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } + float GetPriority() const { return m_Priority; } + const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + // To be used only while the m_Mutex is locked. Used during defragmentation. + size_t GetBlockCount() const { return m_Blocks.size(); } + // To be used only while the m_Mutex is locked. Used during defragmentation. + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VMA_RW_MUTEX &GetMutex() { return m_Mutex; } + + VkResult CreateMinBlocks(); + void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult CheckCorruption(); + +private: + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + const float m_Priority; + const VkDeviceSize m_MinAllocationAlignment; + + void* const m_pMemoryAllocateNext; + VMA_RW_MUTEX m_Mutex; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector> m_Blocks; + uint32_t m_NextBlockId; + bool m_IncrementalSort = true; + + void SetIncrementalSort(bool val) { m_IncrementalSort = val; } + + VkDeviceSize CalcMaxBlockSize() const; + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + void SortByFreeSize(); + + VkResult AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + bool HasEmptyBlock(); +}; +#endif // _VMA_BLOCK_VECTOR + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT +struct VmaDefragmentationContext_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info); + ~VmaDefragmentationContext_T(); + + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } + + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); + +private: + // Max number of allocations to ignore due to size constraints before ending single pass + static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; + enum class CounterStatus { Pass, Ignore, End }; + + struct FragmentedBlock + { + uint32_t data; + VmaDeviceMemoryBlock* block; + }; + struct StateBalanced + { + VkDeviceSize avgFreeSize = 0; + VkDeviceSize avgAllocSize = UINT64_MAX; + }; + struct StateExtensive + { + enum class Operation : uint8_t + { + FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, + MoveBuffers, MoveTextures, MoveAll, + Cleanup, Done + }; + + Operation operation = Operation::FindFreeBlockTexture; + size_t firstFreeBlock = SIZE_MAX; + }; + struct MoveAllocationData + { + VkDeviceSize size; + VkDeviceSize alignment; + VmaSuballocationType type; + VmaAllocationCreateFlags flags; + VmaDefragmentationMove move = {}; + }; + + const VkDeviceSize m_MaxPassBytes; + const uint32_t m_MaxPassAllocations; + const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback; + void* m_BreakCallbackUserData; + + VmaStlAllocator m_MoveAllocator; + VmaVector> m_Moves; + + uint8_t m_IgnoredAllocs = 0; + uint32_t m_Algorithm; + uint32_t m_BlockVectorCount; + VmaBlockVector* m_PoolBlockVector; + VmaBlockVector** m_pBlockVectors; + size_t m_ImmovableBlockCount = 0; + VmaDefragmentationStats m_GlobalStats = { 0 }; + VmaDefragmentationStats m_PassStats = { 0 }; + void* m_AlgorithmState = VMA_NULL; + + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + CounterStatus CheckCounters(VkDeviceSize bytes); + bool IncrementCounters(VkDeviceSize bytes); + bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + + bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); + bool ComputeDefragmentation_Full(VmaBlockVector& vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + + void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); + bool MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent); +}; +#endif // _VMA_DEFRAGMENTATION_CONTEXT + +#ifndef _VMA_POOL_T +struct VmaPool_T +{ + friend struct VmaPoolListItemTraits; + VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + VmaDedicatedAllocationList m_DedicatedAllocations; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; + VmaPool_T* m_PrevPool = VMA_NULL; + VmaPool_T* m_NextPool = VMA_NULL; +}; + +struct VmaPoolListItemTraits +{ + typedef VmaPool_T ItemType; + + static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } + static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } + static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } + static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } +}; +#endif // _VMA_POOL_T + +#ifndef _VMA_CURRENT_BUDGET_DATA +struct VmaCurrentBudgetData +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData) +public: + + VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // VMA_MEMORY_BUDGET + + VmaCurrentBudgetData(); + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); +}; + +#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +VmaCurrentBudgetData::VmaCurrentBudgetData() +{ + for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockCount[heapIndex] = 0; + m_AllocationCount[heapIndex] = 0; + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif +} + +void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + m_AllocationBytes[heapIndex] += allocationSize; + ++m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} + +void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); + m_AllocationBytes[heapIndex] -= allocationSize; + VMA_ASSERT(m_AllocationCount[heapIndex] > 0); + --m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} +#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +#endif // _VMA_CURRENT_BUDGET_DATA + +#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) + : m_Allocator(pAllocationCallbacks, 1024) {} + + template VmaAllocation Allocate(Types&&... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +template +VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} +#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR + +#ifndef _VMA_VIRTUAL_BLOCK_T +struct VmaVirtualBlock_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T) +public: + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + + VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); + ~VmaVirtualBlock_T(); + + VkResult Init() { return VK_SUCCESS; } + bool IsEmpty() const { return m_Metadata->IsEmpty(); } + void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } + void Clear() { m_Metadata->Clear(); } + + const VkAllocationCallbacks* GetAllocationCallbacks() const; + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset); + void GetStatistics(VmaStatistics& outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; +#if VMA_STATS_STRING_ENABLED + void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; +#endif + +private: + VmaBlockMetadata* m_Metadata; +}; + +#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) + : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) +{ + const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; + switch (algorithm) + { + case 0: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + break; + case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); + break; + default: + VMA_ASSERT(0); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + } + + m_Metadata->Init(createInfo.size); +} + +VmaVirtualBlock_T::~VmaVirtualBlock_T() +{ + // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT + // to receive the list of the unfreed allocations. + if (!m_Metadata->IsEmpty()) + m_Metadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased virtual allocations. + VMA_ASSERT_LEAK(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); + + vma_delete(GetAllocationCallbacks(), m_Metadata); +} + +const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const +{ + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; +} + +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) +{ + m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); +} + +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset) +{ + VmaAllocationRequest request = {}; + if (m_Metadata->CreateAllocationRequest( + createInfo.size, // allocSize + VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment + (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress + VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant + createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy + &request)) + { + m_Metadata->Alloc(request, + VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant + createInfo.pUserData); + outAllocation = (VmaVirtualAllocation)request.allocHandle; + if(outOffset) + *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); + return VK_SUCCESS; + } + outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; + if (outOffset) + *outOffset = UINT64_MAX; + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +{ + VmaClearStatistics(outStats); + m_Metadata->AddStatistics(outStats); +} + +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const +{ + VmaClearDetailedStatistics(outStats); + m_Metadata->AddDetailedStatistics(outStats); +} + +#if VMA_STATS_STRING_ENABLED +void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const +{ + VmaJsonWriter json(GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaDetailedStatistics stats; + CalculateDetailedStatistics(stats); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats); + + if (detailedMap) + { + json.WriteString("Details"); + json.BeginObject(); + m_Metadata->PrintDetailedMap(json); + json.EndObject(); + } + + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +#endif // _VMA_VIRTUAL_BLOCK_T + + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T) +public: + const bool m_UseMutex; + const uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + bool m_UseExtMemoryPriority; + bool m_UseKhrMaintenance4; + bool m_UseKhrMaintenance5; + bool m_UseKhrExternalMemoryWin32; + const VkDevice m_hDevice; + const VkInstance m_hInstance; + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_MIN_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + VkResult FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VmaBufferImageUsage bufImgUsage, + uint32_t* pMemoryTypeIndex) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + void CalculateStatistics(VmaTotalStatistics* pStats); + + void GetHeapBudgets( + VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + void GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); + void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + VkResult CopyMemoryToAllocation( + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size); + VkResult CopyAllocationToMemory( + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const + { + return m_TypeExternalMemoryHandleTypes[memTypeIndex]; + } +#endif // #if VMA_EXTERNAL_MEMORY + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; +#endif // #if VMA_EXTERNAL_MEMORY + + VMA_RW_MUTEX m_PoolsMutex; + typedef VmaIntrusiveLinkedList PoolList; + // Protected by m_PoolsMutex. + PoolList m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions(); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain = VMA_NULL); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + VkResult CalcMemTypeParams( + VmaAllocationCreateInfo& outCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount); + VkResult CalcAllocationParams( + VmaAllocationCreateInfo& outCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + + +#ifndef _VMA_MEMORY_FUNCTIONS +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} +#endif // _VMA_MEMORY_FUNCTIONS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) + : m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL){} + +VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() +{ + VMA_ASSERT_LEAK(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT_LEAK(m_hMemory == VK_NULL_HANDLE); +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch (algorithm) + { + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + default: + VMA_ASSERT(0); + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT + // to receive the list of the unfreed allocations. + if (!m_pMetadata->IsEmpty()) + m_pMetadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT_LEAK(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT_LEAK(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + m_MappingHysteresis.PostAlloc(); +} + +void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if(m_MappingHysteresis.PostFree()) + { + VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); + if (m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = VMA_NULL; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if (count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (oldTotalMapCount != 0) + { + VMA_ASSERT(m_pMappedData != VMA_NULL); + m_MappingHysteresis.PostMap(); + m_MapCount += count; + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if (result == VK_SUCCESS) + { + VMA_ASSERT(m_pMappedData != VMA_NULL); + m_MappingHysteresis.PostMap(); + m_MapCount = count; + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if (count == 0) + { + return; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if (m_MapCount >= count) + { + m_MapCount -= count; + const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (totalMapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + m_MappingHysteresis.PostUnmap(); + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} + +#if VMA_EXTERNAL_MEMORY_WIN32 +VkResult VmaDeviceMemoryBlock::CreateWin32Handle(const VmaAllocator hAllocator, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, HANDLE* pHandle) noexcept +{ + VMA_ASSERT(pHandle); + return m_Handle.GetHandle(hAllocator->m_hDevice, m_hMemory, pvkGetMemoryWin32HandleKHR, hTargetProcess, hAllocator->m_UseMutex, pHandle); +} +#endif // VMA_EXTERNAL_MEMORY_WIN32 +#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS + +#ifndef _VMA_ALLOCATION_T_FUNCTIONS +VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) + : m_Alignment{ 1 }, + m_Size{ 0 }, + m_pUserData{ VMA_NULL }, + m_pName{ VMA_NULL }, + m_MemoryTypeIndex{ 0 }, + m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, + m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, + m_MapCount{ 0 }, + m_Flags{ 0 } +{ + if(mappingAllowed) + m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; +} + +VmaAllocation_T::~VmaAllocation_T() +{ + VMA_ASSERT_LEAK(m_MapCount == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pName == VMA_NULL); +} + +void VmaAllocation_T::InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + if(mapped) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_AllocHandle = allocHandle; +} + +void VmaAllocation_T::InitDedicatedAllocation( + VmaAllocator allocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + m_DedicatedAllocation.m_ExtraData = VMA_NULL; + m_DedicatedAllocation.m_hParentPool = hParentPool; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_Prev = VMA_NULL; + m_DedicatedAllocation.m_Next = VMA_NULL; + + if (pMappedData != VMA_NULL) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + EnsureExtraData(allocator); + m_DedicatedAllocation.m_ExtraData->m_pMappedData = pMappedData; + } +} + +void VmaAllocation_T::Destroy(VmaAllocator allocator) +{ + FreeName(allocator); + + if (GetType() == ALLOCATION_TYPE_DEDICATED) + { + vma_delete(allocator, m_DedicatedAllocation.m_ExtraData); + } +} + +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) +{ + VMA_ASSERT(pName == VMA_NULL || pName != m_pName); + + FreeName(hAllocator); + + if (pName != VMA_NULL) + m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); +} + +uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) +{ + VMA_ASSERT(allocation != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount != 0) + m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); + + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); + std::swap(m_BlockAllocation, allocation->m_BlockAllocation); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); + +#if VMA_STATS_STRING_ENABLED + std::swap(m_BufferImageUsage, allocation->m_BufferImageUsage); +#endif + return m_MapCount; +} + +VmaAllocHandle VmaAllocation_T::GetAllocHandle() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_AllocHandle; + case ALLOCATION_TYPE_DEDICATED: + return VK_NULL_HANDLE; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VmaPool VmaAllocation_T::GetParentPool() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetParentPool(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hParentPool; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if (m_MapCount != 0 || IsPersistentMap()) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + GetOffset(); + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_ExtraData != VMA_NULL && m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL) == + (m_MapCount != 0 || IsPersistentMap())); + return m_DedicatedAllocation.m_ExtraData != VMA_NULL ? m_DedicatedAllocation.m_ExtraData->m_pMappedData : VMA_NULL; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount < 0xFF) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount > 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + EnsureExtraData(hAllocator); + + if (m_MapCount != 0 || IsPersistentMap()) + { + if (m_MapCount < 0xFF) + { + VMA_ASSERT(m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_ExtraData->m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if (result == VK_SUCCESS) + { + m_DedicatedAllocation.m_ExtraData->m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if (m_MapCount > 0) + { + --m_MapCount; + if (m_MapCount == 0 && !IsPersistentMap()) + { + VMA_ASSERT(m_DedicatedAllocation.m_ExtraData != VMA_NULL); + m_DedicatedAllocation.m_ExtraData->m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage.Value); // It may be uint32_t or uint64_t. + + if (m_pUserData != VMA_NULL) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + if (m_pName != VMA_NULL) + { + json.WriteString("Name"); + json.WriteString(m_pName); + } +} +#if VMA_EXTERNAL_MEMORY_WIN32 +VkResult VmaAllocation_T::GetWin32Handle(VmaAllocator hAllocator, HANDLE hTargetProcess, HANDLE* pHandle) noexcept +{ + auto pvkGetMemoryWin32HandleKHR = hAllocator->GetVulkanFunctions().vkGetMemoryWin32HandleKHR; + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->CreateWin32Handle(hAllocator, pvkGetMemoryWin32HandleKHR, hTargetProcess, pHandle); + case ALLOCATION_TYPE_DEDICATED: + EnsureExtraData(hAllocator); + return m_DedicatedAllocation.m_ExtraData->m_Handle.GetHandle(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory, pvkGetMemoryWin32HandleKHR, hTargetProcess, hAllocator->m_UseMutex, pHandle); + default: + VMA_ASSERT(0); + return VK_ERROR_FEATURE_NOT_PRESENT; + } +} +#endif // VMA_EXTERNAL_MEMORY_WIN32 +#endif // VMA_STATS_STRING_ENABLED + +void VmaAllocation_T::EnsureExtraData(VmaAllocator hAllocator) +{ + if (m_DedicatedAllocation.m_ExtraData == VMA_NULL) + { + m_DedicatedAllocation.m_ExtraData = vma_new(hAllocator, VmaAllocationExtraData)(); + } +} + +void VmaAllocation_T::FreeName(VmaAllocator hAllocator) +{ + if(m_pName) + { + VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); + m_pName = VMA_NULL; + } +} +#endif // _VMA_ALLOCATION_T_FUNCTIONS + +#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext) + : m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_MinAllocationAlignment(minAllocationAlignment), + m_pMemoryAllocateNext(pMemoryAllocateNext), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) {} + +VmaBlockVector::~VmaBlockVector() +{ + for (size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for (size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddStatistics(inoutStats); + } +} + +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +VkResult VmaBlockVector::Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + alignment = VMA_MAX(alignment, m_MinAllocationAlignment); + + if (IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if (res != VK_SUCCESS) + { + break; + } + } + } + + if (res != VK_SUCCESS) + { + // Free all already created allocations. + while (allocIndex--) + Free(pAllocations[allocIndex]); + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !HasExplicitBlockSize() && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // Upper address can only be used with linear allocator and within single memory block. + if (isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + // 1. Search existing allocations. Try to allocate. + if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if (!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from last block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + else + { + if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default + { + const bool isHostVisible = + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + if(isHostVisible) + { + const bool isMappingAllowed = (createInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + /* + For non-mappable allocations, check blocks that are not mapped first. + For mappable allocations, check blocks that are already mapped first. + This way, having many blocks, we will separate mappable and non-mappable allocations, + hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. + */ + for(size_t mappingI = 0; mappingI < 2; ++mappingI) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; + if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + { + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + } + else + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if (canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if (!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if (!m_ExplicitBlockSize) + { + while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if (res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Created new block #%" PRIu32 " Size=%" PRIu64, pBlock->GetId(), newBlockSize); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free(const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if (hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); + pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); + pBlock->PostFree(m_hAllocator); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%" PRIu32, m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if (pBlock->m_pMetadata->IsEmpty()) + { + // Already had empty block. We don't want to have two, so delete this one. + if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if (hadEmptyBlockBeforeFree && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if (pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if (pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%" PRIu32, pBlockToDelete->GetId()); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } + + m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); + hAllocation->Destroy(m_hAllocator); + m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for (size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if (result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if (m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if (!m_IncrementalSort) + return; + if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for (size_t i = 1; i < m_Blocks.size(); ++i) + { + if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + std::swap(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +void VmaBlockVector::SortByFreeSize() +{ + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), + [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if (pBlock->m_pMetadata->CreateAllocationRequest( + size, + alignment, + isUpperAddress, + suballocType, + strategy, + &currRequest)) + { + return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + const bool isMappingAllowed = (allocFlags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + + pBlock->PostAlloc(m_hAllocator); + // Allocate from pCurrBlock. + if (mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); + pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); + (*pAllocation)->InitBlockAllocation( + pBlock, + allocRequest.allocHandle, + alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, + suballocType, + mapped); + VMA_HEAVY_ASSERT(pBlock->Validate()); + if (isUserDataString) + (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + else + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.pNext = m_pMemoryAllocateNext; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if (m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if (m_hAllocator->m_UseExtMemoryPriority) + { + VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); + priorityInfo.priority = m_Priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); + if (exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // VMA_EXTERNAL_MEMORY + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if (res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm, + m_BufferImageGranularity); + + m_Blocks.push_back(pBlock); + if (pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +bool VmaBlockVector::HasEmptyBlock() +{ + for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if (pBlock->m_pMetadata->IsEmpty()) + { + return true; + } + } + return false; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + + json.BeginObject(); + for (size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + json.BeginObject(); + json.WriteString("MapRefCount"); + json.WriteNumber(m_Blocks[i]->GetMapRefCount()); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + json.EndObject(); + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED + +VkResult VmaBlockVector::CheckCorruption() +{ + if (!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +#endif // _VMA_BLOCK_VECTOR_FUNCTIONS + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info) + : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_BreakCallback(info.pfnBreakCallback), + m_BreakCallbackUserData(info.pBreakCallbackUserData), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator) +{ + m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; + + if (info.pool != VMA_NULL) + { + m_BlockVectorCount = 1; + m_PoolBlockVector = &info.pool->m_BlockVector; + m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SetIncrementalSort(false); + m_PoolBlockVector->SortByFreeSize(); + } + else + { + m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); + m_PoolBlockVector = VMA_NULL; + m_pBlockVectors = hAllocator->m_pBlockVectors; + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + { + vector->SetIncrementalSort(false); + vector->SortByFreeSize(); + } + } + } + + switch (m_Algorithm) + { + case 0: // Default algorithm + m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + if (hAllocator->GetBufferImageGranularity() > 1) + { + m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); + } + break; + } +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + if (m_PoolBlockVector != VMA_NULL) + { + m_PoolBlockVector->SetIncrementalSort(true); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SetIncrementalSort(true); + } + } + + if (m_AlgorithmState) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + default: + VMA_ASSERT(0); + } + } +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) +{ + if (m_PoolBlockVector != VMA_NULL) + { + VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); + + if (m_PoolBlockVector->GetBlockCount() > 1) + ComputeDefragmentation(*m_PoolBlockVector, 0); + else if (m_PoolBlockVector->GetBlockCount() == 1) + ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + if (m_pBlockVectors[i] != VMA_NULL) + { + VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); + + if (m_pBlockVectors[i]->GetBlockCount() > 1) + { + if (ComputeDefragmentation(*m_pBlockVectors[i], i)) + break; + } + else if (m_pBlockVectors[i]->GetBlockCount() == 1) + { + if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) + break; + } + } + } + } + + moveInfo.moveCount = static_cast(m_Moves.size()); + if (moveInfo.moveCount > 0) + { + moveInfo.pMoves = m_Moves.data(); + return VK_INCOMPLETE; + } + + moveInfo.pMoves = VMA_NULL; + return VK_SUCCESS; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) +{ + VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); + + VkResult result = VK_SUCCESS; + VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); + VmaVector> immovableBlocks(blockAllocator); + VmaVector> mappedBlocks(blockAllocator); + + VmaAllocator allocator = VMA_NULL; + for (uint32_t i = 0; i < moveInfo.moveCount; ++i) + { + VmaDefragmentationMove& move = moveInfo.pMoves[i]; + size_t prevCount = 0, currentCount = 0; + VkDeviceSize freedBlockSize = 0; + + uint32_t vectorIndex; + VmaBlockVector* vector; + if (m_PoolBlockVector != VMA_NULL) + { + vectorIndex = 0; + vector = m_PoolBlockVector; + } + else + { + vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); + vector = m_pBlockVectors[vectorIndex]; + VMA_ASSERT(vector != VMA_NULL); + } + + switch (move.operation) + { + case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: + { + uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); + if (mapCount > 0) + { + allocator = vector->m_hAllocator; + VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (FragmentedBlock& block : mappedBlocks) + { + if (block.block == newMapBlock) + { + notPresent = false; + block.data += mapCount; + break; + } + } + if (notPresent) + mappedBlocks.push_back({ mapCount, newMapBlock }); + } + + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + vector->Free(move.dstTmpAllocation); + + VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (const FragmentedBlock& block : immovableBlocks) + { + if (block.block == newBlock) + { + notPresent = false; + break; + } + } + if (notPresent) + immovableBlocks.push_back({ vectorIndex, newBlock }); + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.srcAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + freedBlockSize *= prevCount - currentCount; + + VkDeviceSize dstBlockSize; + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + default: + VMA_ASSERT(0); + } + + if (prevCount > currentCount) + { + size_t freedBlocks = prevCount - currentCount; + m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); + m_PassStats.bytesFreed += freedBlockSize; + } + + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT && + m_AlgorithmState != VMA_NULL) + { + // Avoid unnecessary tries to allocate when new free block is available + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; + if (state.firstFreeBlock != SIZE_MAX) + { + const size_t diff = prevCount - currentCount; + if (state.firstFreeBlock >= diff) + { + state.firstFreeBlock -= diff; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + } + else + state.firstFreeBlock = 0; + } + } + } + moveInfo.moveCount = 0; + moveInfo.pMoves = VMA_NULL; + m_Moves.clear(); + + // Update stats + m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; + m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; + m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; + m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; + m_PassStats = { 0 }; + + // Move blocks with immovable allocations according to algorithm + if (immovableBlocks.size() > 0) + { + do + { + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) + { + if (m_AlgorithmState != VMA_NULL) + { + bool swapped = false; + // Move to the start of free blocks range + for (const FragmentedBlock& block : immovableBlocks) + { + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; + if (state.operation != StateExtensive::Operation::Cleanup) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) + { + if (vector->GetBlock(i) == block.block) + { + std::swap(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); + if (state.firstFreeBlock != SIZE_MAX) + { + if (i + 1 < state.firstFreeBlock) + { + if (state.firstFreeBlock > 1) + std::swap(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + else + --state.firstFreeBlock; + } + } + swapped = true; + break; + } + } + } + } + if (swapped) + result = VK_INCOMPLETE; + break; + } + } + + // Move to the beginning + for (const FragmentedBlock& block : immovableBlocks) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) + { + if (vector->GetBlock(i) == block.block) + { + std::swap(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); + break; + } + } + } + } while (false); + } + + // Bulk-map destination blocks + for (const FragmentedBlock& block : mappedBlocks) + { + VkResult res = block.block->Map(allocator, block.data, VMA_NULL); + VMA_ASSERT(res == VK_SUCCESS); + } + return result; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) +{ + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: + return ComputeDefragmentation_Fast(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + return ComputeDefragmentation_Balanced(vector, index, true); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: + return ComputeDefragmentation_Full(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + return ComputeDefragmentation_Extensive(vector, index); + default: + VMA_ASSERT(0); + return ComputeDefragmentation_Balanced(vector, index, true); + } +} + +VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( + VmaAllocHandle handle, VmaBlockMetadata* metadata) +{ + MoveAllocationData moveData; + moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); + moveData.size = moveData.move.srcAllocation->GetSize(); + moveData.alignment = moveData.move.srcAllocation->GetAlignment(); + moveData.type = moveData.move.srcAllocation->GetSuballocationType(); + moveData.flags = 0; + + if (moveData.move.srcAllocation->IsPersistentMap()) + moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; + if (moveData.move.srcAllocation->IsMappingAllowed()) + moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + + return moveData; +} + +VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) +{ + // Check custom criteria if exists + if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData)) + return CounterStatus::End; + + // Ignore allocation if will exceed max size for copy + if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) + { + if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) + return CounterStatus::Ignore; + else + return CounterStatus::End; + } + else + m_IgnoredAllocs = 0; + return CounterStatus::Pass; +} + +bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) +{ + m_PassStats.bytesMoved += bytes; + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { + VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || + m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); + return true; + } + return false; +} + +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) +{ + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) +{ + for (; start < end; ++start) + { + VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) + { + if (vector.AllocateFromBlock(dstBlock, + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &data.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(data.move); + if (IncrementCounters(data.size)) + return true; + break; + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) +{ + // Move only between blocks + + // Go through allocations in last blocks and try to fit them inside first ones + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0), + // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block) + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + if (update && vectorState.avgAllocSize == UINT64_MAX) + UpdateVectorStatistics(vector, vectorState); + + const size_t startMoveCount = m_Moves.size(); + VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + VkDeviceSize prevFreeRegionSize = 0; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + // Check if realloc will make sense + if (prevFreeRegionSize >= minimalFreeRegion || + nextFreeRegionSize >= minimalFreeRegion || + moveData.size <= vectorState.avgFreeSize || + moveData.size <= vectorState.avgAllocSize) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + prevFreeRegionSize = nextFreeRegionSize; + } + } + + // No moves performed, update statistics to current vector state + if (startMoveCount == m_Moves.size() && !update) + { + vectorState.avgAllocSize = UINT64_MAX; + return ComputeDefragmentation_Balanced(vector, index, false); + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0) + + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) +{ + // First free single block, then populate it to the brim, then free another block, and so on + + // Fallback to previous algorithm since without granularity conflicts it can achieve max packing + if (vector.m_BufferImageGranularity == 1) + return ComputeDefragmentation_Full(vector); + + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + + bool texturePresent = false, bufferPresent = false, otherPresent = false; + switch (vectorState.operation) + { + case StateExtensive::Operation::Done: // Vector defragmented + return false; + case StateExtensive::Operation::FindFreeBlockBuffer: + case StateExtensive::Operation::FindFreeBlockTexture: + case StateExtensive::Operation::FindFreeBlockAll: + { + // No more blocks to free, just perform fast realloc and move to cleanup + if (vectorState.firstFreeBlock == 0) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + return ComputeDefragmentation_Fast(vector); + } + + // No free blocks, have to clear last one + size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; + VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; + + const size_t prevMoveCount = m_Moves.size(); + for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, freeMetadata); + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, last, moveData, vector)) + { + // Full clear performed already + if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) + vectorState.firstFreeBlock = last; + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // Cannot perform full clear, have to move data in other blocks around + if (last != 0) + { + for (size_t i = last - 1; i; --i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // No possible reallocs within blocks, try to move them around fast + return ComputeDefragmentation_Fast(vector); + } + } + else + { + switch (vectorState.operation) + { + case StateExtensive::Operation::FindFreeBlockBuffer: + vectorState.operation = StateExtensive::Operation::MoveBuffers; + break; + case StateExtensive::Operation::FindFreeBlockTexture: + vectorState.operation = StateExtensive::Operation::MoveTextures; + break; + case StateExtensive::Operation::FindFreeBlockAll: + vectorState.operation = StateExtensive::Operation::MoveAll; + break; + default: + VMA_ASSERT(0); + vectorState.operation = StateExtensive::Operation::MoveTextures; + } + vectorState.firstFreeBlock = last; + // Nothing done, block found without reallocations, can perform another reallocs in same pass + return ComputeDefragmentation_Extensive(vector, index); + } + break; + } + case StateExtensive::Operation::MoveTextures: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (texturePresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!bufferPresent && !otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more textures to move, check buffers + vectorState.operation = StateExtensive::Operation::MoveBuffers; + bufferPresent = false; + otherPresent = false; + } + else + break; + VMA_FALLTHROUGH; // Fallthrough + } + case StateExtensive::Operation::MoveBuffers: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (bufferPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more buffers to move, check all others + vectorState.operation = StateExtensive::Operation::MoveAll; + otherPresent = false; + } + else + break; + VMA_FALLTHROUGH; // Fallthrough + } + case StateExtensive::Operation::MoveAll: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (otherPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + // Everything moved + vectorState.operation = StateExtensive::Operation::Cleanup; + } + break; + } + case StateExtensive::Operation::Cleanup: + // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). + break; + } + + if (vectorState.operation == StateExtensive::Operation::Cleanup) + { + // All other work done, pack data in blocks even tighter if possible + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + + if (prevMoveCount == m_Moves.size()) + vectorState.operation = StateExtensive::Operation::Done; + } + return false; +} + +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +{ + size_t allocCount = 0; + size_t freeCount = 0; + state.avgFreeSize = 0; + state.avgAllocSize = 0; + + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + allocCount += metadata->GetAllocationCount(); + freeCount += metadata->GetFreeRegionsCount(); + state.avgFreeSize += metadata->GetSumFreeSize(); + state.avgAllocSize += metadata->GetSize(); + } + + state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; + state.avgFreeSize /= freeCount; +} + +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent) +{ + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = firstFreeBlock ; i;) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(--i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Move only single type of resources at once + if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) + { + // Try to fit allocation into free blocks + if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) + return false; + } + + if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) + texturePresent = true; + else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) + bufferPresent = true; + else + otherPresent = true; + } + } + return prevMoveCount == m_Moves.size(); +} +#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS + +#ifndef _VMA_POOL_T_FUNCTIONS +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) + : m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), + m_Id(0), + m_Name(VMA_NULL) {} + +VmaPool_T::~VmaPool_T() +{ + VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); + + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if (pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} +#endif // _VMA_POOL_T_FUNCTIONS + +#ifndef _VMA_ALLOCATOR_T_FUNCTIONS +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_UseKhrMaintenance4((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0), + m_UseKhrMaintenance5((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT) != 0), + m_UseKhrExternalMemoryWin32((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_DeviceMemoryCount(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1002000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if !(VMA_MEMORY_PRIORITY) + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE4) + if(m_UseKhrMaintenance4) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE5) + if(m_UseKhrMaintenance5) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE5) + if(m_UseKhrMaintenance5) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + +#if !(VMA_EXTERNAL_MEMORY_WIN32) + if(m_UseKhrExternalMemoryWin32) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + +#if VMA_EXTERNAL_MEMORY + memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) + { + memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, + sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); + } +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + // Create only supported types + if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + false, // explicitBlockSize + 0, // algorithm + 0.5f, // priority (0.5 is the default per Vulkan spec) + GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment + VMA_NULL); // // pMemoryAllocateNext + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // because minBlockCount is 0. + } + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ + VMA_ASSERT(m_Pools.IsEmpty()); + + for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) + { + vma_delete(this, m_pBlockVectors[memTypeIndex]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; + m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; + m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; + } +#endif +} + +#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + VMA_COPY_IF_NOT_NULL(vkGetMemoryWin32HandleKHR); +#endif +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && + "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " + "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " + "Other members can be null."); + +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + } + else if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + } + else if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); + } +#endif +#if VMA_KHR_MAINTENANCE4 + if(m_UseKhrMaintenance4) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirementsKHR, "vkGetDeviceBufferMemoryRequirementsKHR"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirementsKHR, "vkGetDeviceImageMemoryRequirementsKHR"); + } +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + if (m_UseKhrExternalMemoryWin32) + { + VMA_FETCH_DEVICE_FUNC(vkGetMemoryWin32HandleKHR, PFN_vkGetMemoryWin32HandleKHR, "vkGetMemoryWin32HandleKHR"); + } +#endif +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + if (m_UseKhrExternalMemoryWin32) + { + VMA_ASSERT(m_VulkanFunctions.vkGetMemoryWin32HandleKHR != VMA_NULL); + } +#endif + + // Not validating these due to suspected driver bugs with these function + // pointers being null despite correct extension or Vulkan version is enabled. + // See issue #397. Their usage in VMA is optional anyway. + // + // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); + // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%" PRIu32 ", AllocationCount=%zu, Size=%" PRIu64, memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + VkResult res = CalcMemTypeParams( + finalCreateInfo, + memTypeIndex, + size, + allocationCount); + if(res != VK_SUCCESS) + return res; + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + return AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + } + else + { + const bool canAllocateDedicated = + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); + + if(canAllocateDedicated) + { + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + if(size > blockVector.GetPreferredBlockSize() / 2) + { + dedicatedPreferred = true; + } + // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, + // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above + // 3/4 of the maximum allocation count. + if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && + m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) + { + dedicatedPreferred = false; + } + + if(dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + } + + res = blockVector.Allocate( + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Try dedicated memory. + if(canAllocateDedicated && !dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + allocInfo.pNext = pNextChain; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(!canAliasMemory) + { + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == VmaBufferImageUsage::UNKNOWN || + dedicatedBufferImageUsage.Contains(VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(priority >= 0.f && priority <= 1.f); + priorityInfo.priority = priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); + if(exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // #if VMA_EXTERNAL_MEMORY + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + pool, + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + isMappingAllowed, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + dedicatedAllocations.Register(pAllocations[allocIndex]); + } + VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%" PRIu32, allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); + (*pAllocation)->InitDedicatedAllocation(this, pool, memTypeIndex, hMemory, suballocType, pMappedData, size); + if (isUserDataString) + (*pAllocation)->SetName(this, (const char*)pUserData); + else + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VmaBufferImageUsage bufImgUsage, + uint32_t* pMemoryTypeIndex) const +{ + memoryTypeBits &= GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; + if(!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VkResult VmaAllocator_T::CalcMemTypeParams( + VmaAllocationCreateInfo& inoutCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount) +{ + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetHeapBudgets(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::CalcAllocationParams( + VmaAllocationCreateInfo& inoutCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred) +{ + VMA_ASSERT((inoutCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + { + VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + } + } + + // If memory is lazily allocated, it should be always dedicated. + if(dedicatedRequired || + inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if(inoutCreateInfo.pool != VK_NULL_HANDLE) + { + if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + // Non-auto USAGE values imply HOST_ACCESS flags. + // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. + // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. + // Otherwise they just protect from assert on mapping. + if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + } + } + + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VmaAllocationCreateInfo createInfoFinal = createInfo; + VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); + if(res != VK_SUCCESS) + return res; + + if(createInfoFinal.pool != VK_NULL_HANDLE) + { + VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + return AllocateMemoryOfType( + createInfoFinal.pool, + vkMemReq.size, + vkMemReq.alignment, + prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + blockVector.GetMemoryTypeIndex(), + suballocType, + createInfoFinal.pool->m_DedicatedAllocations, + blockVector, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + if(res != VK_SUCCESS) + return res; + do + { + VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); + res = AllocateMemoryOfType( + VK_NULL_HANDLE, + vkMemReq.size, + vkMemReq.alignment, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + memTypeIndex, + suballocType, + m_DedicatedAllocations[memTypeIndex], + *blockVector, + allocationCount, + pAllocations); + // Allocation succeeded + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + } while(res == VK_SUCCESS); + + // No other matching memory type index could be found. + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + } +} + +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) +{ + // Initialize. + VmaClearDetailedStatistics(pStats->total); + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + VmaClearDetailedStatistics(pStats->memoryType[i]); + for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + VmaClearDetailedStatistics(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if (pBlockVector != VMA_NULL) + pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); + blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Sum from memory types to memory heaps. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; + VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); + } + + // Sum from memory heaps to total. + for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); + + VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + VMA_ASSERT(pStats->total.unusedRangeCount == 0 || + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); +} + +void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudgets->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudgets->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudgets->usage = outBudgets->statistics.blockBytes; + outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + pAllocationInfo->pName = hAllocation->GetName(); +} + +void VmaAllocator_T::GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo) +{ + GetAllocationInfo(hAllocation, &pAllocationInfo->allocationInfo); + + switch (hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + pAllocationInfo->blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); + pAllocationInfo->dedicatedMemory = VK_FALSE; + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + pAllocationInfo->blockSize = pAllocationInfo->allocationInfo.size; + pAllocationInfo->dedicatedMemory = VK_TRUE; + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%" PRIu32 ", flags=%" PRIu32, pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. + if(pCreateInfo->pMemoryAllocateNext) + { + VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); + } + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + if(newCreateInfo.minAllocationAlignment > 0) + { + VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + m_Pools.PushBack(*pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + m_Pools.Remove(pool); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) +{ + VmaClearStatistics(*pPoolStats); + pool->m_BlockVector.AddStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); +} + +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +{ + VmaClearDetailedStatistics(*pPoolStats); + pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if(pBlockVector != VMA_NULL) + { + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = pool->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + AtomicTransactionalIncrement deviceMemoryCountIncrement; + const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); +#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } +#endif + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + ++m_Budget.m_BlockCount[heapIndex]; + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + + deviceMemoryCountIncrement.Commit(); + } + else + { + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= size; + + --m_DeviceMemoryCount; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN_COPY; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN_COPY; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::CopyMemoryToAllocation( + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size) +{ + void* dstMappedData = VMA_NULL; + VkResult res = Map(dstAllocation, &dstMappedData); + if(res == VK_SUCCESS) + { + memcpy((char*)dstMappedData + dstAllocationLocalOffset, pSrcHostPointer, (size_t)size); + Unmap(dstAllocation); + res = FlushOrInvalidateAllocation(dstAllocation, dstAllocationLocalOffset, size, VMA_CACHE_FLUSH); + } + return res; +} + +VkResult VmaAllocator_T::CopyAllocationToMemory( + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size) +{ + void* srcMappedData = VMA_NULL; + VkResult res = Map(srcAllocation, &srcMappedData); + if(res == VK_SUCCESS) + { + res = FlushOrInvalidateAllocation(srcAllocation, srcAllocationLocalOffset, size, VMA_CACHE_INVALIDATE); + if(res == VK_SUCCESS) + { + memcpy(pDstHostPointer, (const char*)srcMappedData + srcAllocationLocalOffset, (size_t)size); + Unmap(srcAllocation); + } + } + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + VmaPool parentPool = allocation->GetParentPool(); + if(parentPool == VK_NULL_HANDLE) + { + // Default pool + m_DedicatedAllocations[memTypeIndex].Unregister(allocation); + } + else + { + // Custom pool + parentPool->m_DedicatedAllocations.Unregister(allocation); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + allocation->Destroy(this); + m_AllocationObjectAllocator.Free(allocation); + + VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%" PRIu32, memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} +#endif // VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + hAllocation->IsMappingAllowed() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + json.WriteString("DefaultPools"); + json.BeginObject(); + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + if (pBlockVector != VMA_NULL) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(pBlockVector->GetPreferredBlockSize()); + + json.WriteString("Blocks"); + pBlockVector->PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + dedicatedAllocList.BuildStatsString(json); + } + json.EndObject(); + } + } + } + json.EndObject(); + + json.WriteString("CustomPools"); + json.BeginObject(); + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + if (!m_Pools.IsEmpty()) + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + bool displayType = true; + size_t index = 0; + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + if (blockVector.GetMemoryTypeIndex() == memTypeIndex) + { + if (displayType) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginArray(); + displayType = false; + } + + json.BeginObject(); + { + json.WriteString("Name"); + json.BeginString(); + json.ContinueString((uint64_t)index++); + if (pool->GetName()) + { + json.ContinueString(" - "); + json.ContinueString(pool->GetName()); + } + json.EndString(); + + json.WriteString("PreferredBlockSize"); + json.WriteNumber(blockVector.GetPreferredBlockSize()); + + json.WriteString("Blocks"); + blockVector.PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); + } + json.EndObject(); + } + } + + if (!displayType) + json.EndArray(); + } + } + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_ALLOCATOR_T_FUNCTIONS + + +#ifndef _VMA_PUBLIC_INTERFACE +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + VkResult result = (*pAllocator)->Init(pCreateInfo); + if(result < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); + *pAllocator = VK_NULL_HANDLE; + } + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator allocator, + VmaTotalStatistics* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStatistics(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator allocator, + VmaBudget* pBudgets) +{ + VMA_ASSERT(allocator && pBudgets); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator->GetAllocationCallbacks()); + { + VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; + allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); + + VmaTotalStatistics stats; + allocator->CalculateStatistics(&stats); + + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + { + json.WriteString("General"); + json.BeginObject(); + { + const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; + + json.WriteString("API"); + json.WriteString("Vulkan"); + + json.WriteString("apiVersion"); + json.BeginString(); + json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion)); + json.EndString(); + + json.WriteString("GPU"); + json.WriteString(deviceProperties.deviceName); + json.WriteString("deviceType"); + json.WriteNumber(static_cast(deviceProperties.deviceType)); + + json.WriteString("maxMemoryAllocationCount"); + json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); + json.WriteString("bufferImageGranularity"); + json.WriteNumber(deviceProperties.limits.bufferImageGranularity); + json.WriteString("nonCoherentAtomSize"); + json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); + + json.WriteString("memoryHeapCount"); + json.WriteNumber(memoryProperties.memoryHeapCount); + json.WriteString("memoryTypeCount"); + json.WriteNumber(memoryProperties.memoryTypeCount); + } + json.EndObject(); + } + { + json.WriteString("Total"); + VmaPrintDetailedStatistics(json, stats.total); + } + { + json.WriteString("MemoryInfo"); + json.BeginObject(); + { + for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + { + const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + json.WriteString("Flags"); + json.BeginArray(true); + { + if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + #if VMA_VULKAN_VERSION >= 1001000 + if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) + json.WriteString("MULTI_INSTANCE"); + #endif + + VkMemoryHeapFlags flags = heapInfo.flags & + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + #endif + ); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Size"); + json.WriteNumber(heapInfo.size); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BudgetBytes"); + json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("UsageBytes"); + json.WriteNumber(budgets[heapIndex].usage); + } + json.EndObject(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); + + json.WriteString("MemoryPools"); + json.BeginObject(); + { + for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("Flags"); + json.BeginArray(true); + { + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) + json.WriteString("HOST_VISIBLE"); + if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + json.WriteString("HOST_COHERENT"); + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) + json.WriteString("HOST_CACHED"); + if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) + json.WriteString("LAZILY_ALLOCATED"); + #if VMA_VULKAN_VERSION >= 1001000 + if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) + json.WriteString("PROTECTED"); + #endif + #if VK_AMD_device_coherent_memory + if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) + json.WriteString("DEVICE_COHERENT_AMD"); + if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) + json.WriteString("DEVICE_UNCACHED_AMD"); + #endif + + flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT + #endif + #if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY + | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY + #endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT + | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); + } + json.EndObject(); + } + } + + } + json.EndObject(); + } + json.EndObject(); + } + } + json.EndObject(); + } + + if (detailedMap == VK_TRUE) + allocator->PrintDetailedMap(json); + + json.EndObject(); + } + + *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); + } +} + +#endif // VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage::UNKNOWN, pMemoryTypeIndex); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceBufferMemoryRequirements) + { + // Can query straight from VkBufferCreateInfo :) + VkDeviceBufferMemoryRequirementsKHR devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR}; + devBufMemReq.pCreateInfo = pBufferCreateInfo; + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); + } + else +#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy buffer to query :( + VkBuffer hBuffer = VK_NULL_HANDLE; + res = funcs->vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); + + funcs->vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceImageMemoryRequirements) + { + // Can query straight from VkImageCreateInfo :) + VkDeviceImageMemoryRequirementsKHR devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR}; + devImgMemReq.pCreateInfo = pImageCreateInfo; + VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); + } + else +#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy image to query :( + VkImage hImage = VK_NULL_HANDLE; + res = funcs->vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); + + funcs->vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CreatePool(pCreateInfo, pPool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaDetailedStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->CalculatePoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo2* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo2(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName) +{ + allocation->SetName(allocator, pName); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) +{ + VMA_ASSERT(allocator && allocation && pFlags); + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->Map(allocation, ppData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( + VmaAllocator allocator, + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && pSrcHostPointer && dstAllocation); + + if(size == 0) + { + return VK_SUCCESS; + } + + VMA_DEBUG_LOG("vmaCopyMemoryToAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CopyMemoryToAllocation(pSrcHostPointer, dstAllocation, dstAllocationLocalOffset, size); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( + VmaAllocator allocator, + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && srcAllocation && pDstHostPointer); + + if(size == 0) + { + return VK_SUCCESS; + } + + VMA_DEBUG_LOG("vmaCopyAllocationToMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CopyAllocationToMemory(srcAllocation, srcAllocationLocalOffset, pDstHostPointer, size); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator allocator, + uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator allocator, + const VmaDefragmentationInfo* pInfo, + VmaDefragmentationContext* pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + VMA_DEBUG_LOG("vmaBeginDefragmentation"); + + if (pInfo->pool != VMA_NULL) + { + // Check if run on supported algorithms + if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); + return VK_SUCCESS; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationStats* pStats) +{ + VMA_ASSERT(allocator && context); + + VMA_DEBUG_LOG("vmaEndDefragmentation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if (pStats) + context->GetStats(*pStats); + vma_delete(allocator, context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassBegin(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassEnd(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 2a. Include minAlignment + vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); + VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize()); + + VMA_DEBUG_LOG("vmaCreateAliasingBuffer2"); + + *pBuffer = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if (res >= 0) + { + // 2. Bind buffer with memory. + res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + VmaBufferImageUsage(*pImageCreateInfo), // dedicatedBufferImageUsage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitImageUsage(*pImageCreateInfo); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); + + *pImage = VK_NULL_HANDLE; + + VMA_DEBUG_LOG("vmaCreateImage2"); + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if (res >= 0) + { + // 2. Bind image with memory. + res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) +{ + VMA_ASSERT(pCreateInfo && pVirtualBlock); + VMA_ASSERT(pCreateInfo->size > 0); + VMA_DEBUG_LOG("vmaCreateVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); + VkResult res = (*pVirtualBlock)->Init(); + if(res < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); + *pVirtualBlock = VK_NULL_HANDLE; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) +{ + if(virtualBlock != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, virtualBlock); + } +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); + VMA_DEBUG_LOG("vmaVirtualAllocate"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) +{ + if(allocation != VK_NULL_HANDLE) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaVirtualFree"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Free(allocation); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaClearVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Clear(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->SetAllocationUserData(allocation, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetStatistics(*pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->CalculateDetailedStatistics(*pStats); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); + VmaStringBuilder sb(allocationCallbacks); + virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); + *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); + } +} +#if VMA_EXTERNAL_MEMORY_WIN32 +VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle) +{ + VMA_ASSERT(allocator && allocation && pHandle); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return allocation->GetWin32Handle(allocator, hTargetProcess, pHandle); +} +#endif // VMA_EXTERNAL_MEMORY_WIN32 +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_PUBLIC_INTERFACE +#endif // VMA_IMPLEMENTATION + +/** +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +While you can pull the entire repository e.g. as Git module, there is also Cmake script provided, +you don't need to build it as a separate library project. +You can add file "vk_mem_alloc.h" directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, it will result in linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exactly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose, e.g. "VmaUsage.cpp". + +This library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. +It may be a good idea to create a dedicate header file for this purpose, e.g. "VmaUsage.h", +that will be included in other source files instead of VMA header directly. + +This library is written in C++, but has C-compatible interface. +Thus, you can include and use "vk_mem_alloc.h" in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. +Some features of C++14 are used and required. Features of C++20 are used optionally when available. +Some headers of standard C and C++ library are used, but STL containers, RTTI, or C++ exceptions are not used. + + +\section quick_start_initialization Initialization + +VMA offers library interface in a style similar to Vulkan, with object handles like #VmaAllocation, +structures describing parameters of objects to be created like #VmaAllocationCreateInfo, +and errors codes returned from functions using `VkResult` type. + +The first and the main object that needs to be created is #VmaAllocator. +It represents the initialization of the entire library. +Only one such object should be created per `VkDevice`. +You should create it at program startup, after `VkDevice` was created, and before any device memory allocator needs to be made. +It must be destroyed before `VkDevice` is destroyed. + +At program startup: + +-# Initialize Vulkan to have `VkInstance`, `VkPhysicalDevice`, `VkDevice` object. +-# Fill VmaAllocatorCreateInfo structure and call vmaCreateAllocator() to create #VmaAllocator object. + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags. +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. +See below for details. + +\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version + +VMA supports Vulkan version down to 1.0, for backward compatibility. +If you want to use higher version, you need to inform the library about it. +This is a two-step process. + +Step 1: Compile time. By default, VMA compiles with code supporting the highest +Vulkan version found in the included `` that is also supported by the library. +If this is OK, you don't need to do anything. +However, if you want to compile VMA as if only some lower Vulkan version was available, +define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`. +It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version. +For example, to compile against Vulkan 1.2: + +\code +#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2 +#include "vk_mem_alloc.h" +\endcode + +Step 2: Runtime. Even when compiled with higher Vulkan version available, +VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object. +By default, only Vulkan 1.0 is used. +To initialize the allocator with support for higher Vulkan version, you need to set member +VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`. +See code sample below. + +\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions + +You may need to configure importing Vulkan functions. There are 3 ways to do this: + +-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): + - You don't need to do anything. + - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. +-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, + `vkGetDeviceProcAddr` (this is the option presented in the example below): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. + - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, + VmaVulkanFunctions::vkGetDeviceProcAddr. + - The library will fetch pointers to all other functions it needs internally. +-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like + [Volk](https://github.com/zeux/volk): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. + - Pass these pointers via structure #VmaVulkanFunctions. + +\subsection quick_start_initialization_enabling_extensions Enabling extensions + +VMA can automatically use following Vulkan extensions. +If you found them available on the selected physical device and you enabled them +while creating `VkInstance` / `VkDevice` object, inform VMA about their availability +by setting appropriate flags in VmaAllocatorCreateInfo::flags. + +Vulkan extension | VMA flag +------------------------------|----------------------------------------------------- +VK_KHR_dedicated_allocation | #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT +VK_KHR_bind_memory2 | #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT +VK_KHR_maintenance4 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT +VK_KHR_maintenance5 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT +VK_EXT_memory_budget | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT +VK_KHR_buffer_device_address | #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +VK_EXT_memory_priority | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +VK_AMD_device_coherent_memory | #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +VK_KHR_external_memory_win32 | #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT + +Example with fetching pointers to Vulkan functions dynamically: + +\code +#define VMA_STATIC_VULKAN_FUNCTIONS 0 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#include "vk_mem_alloc.h" + +... + +VmaVulkanFunctions vulkanFunctions = {}; +vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; +vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; + +VmaAllocatorCreateInfo allocatorCreateInfo = {}; +allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT; +allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorCreateInfo.physicalDevice = physicalDevice; +allocatorCreateInfo.device = device; +allocatorCreateInfo.instance = instance; +allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorCreateInfo, &allocator); + +// Entire program... + +// At the end, don't forget to: +vmaDestroyAllocator(allocator); +\endcode + + +\subsection quick_start_initialization_other_config Other configuration options + +There are additional configuration options available through preprocessor macros that you can define +before including VMA header and through parameters passed in #VmaAllocatorCreateInfo. +They include a possibility to use your own callbacks for host memory allocations (`VkAllocationCallbacks`), +callbacks for device memory allocations (instead of `vkAllocateMemory`, `vkFreeMemory`), +or your custom `VMA_ASSERT` macro, among others. +For more information, see: @ref configuration. + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your buffer and allocation objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +\endcode + +If you need to map the buffer, you must set flag +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. +There are many additional parameters that can control the choice of memory type to be used for the allocation +and other features. +For more information, see documentation chapters: @ref choosing_memory_type, @ref memory_mapping. + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# If you want to create a buffer or an image, allocate memory for it, and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). + This is the easiest and recommended way to use this library! + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can +do it using following code. The buffer will most likely end up in a memory type with +`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory +on systems with discrete graphics card that have the memories separate, you can use +#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. + +When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, +you also need to specify one of the host access flags: +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +so you can map it. + +For example, a staging buffer that will be filled via mapped pointer and then +used as a source of transfer to the buffer described previously can be created like this. +It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` +but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). + +\code +VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +stagingBufferInfo.size = 65536; +stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo stagingAllocInfo = {}; +stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; +stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer stagingBuffer; +VmaAllocation stagingAllocation; +vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); +\endcode + +For more examples of creating different kinds of resources, see chapter \ref usage_patterns. +See also: @ref memory_mapping. + +Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows +about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, +so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. +If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting +memory type, as described below. + +\note +Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, +`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) +are still available and work same way as in previous versions of the library +for backward compatibility, but they are deprecated. + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, +plus some extra "magic" (heuristics). + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +You can also use this parameter to exclude some memory types. +If you inspect memory heaps and types available on the current physical device and +you determine that for some reason you don't want to use a specific memory type for the allocation, +you can enable automatic memory type selection but exclude certain memory type or types +by setting all bits of `memoryTypeBits` to 1 except the ones you choose. + +\code +// ... +uint32_t excludedMemoryTypeIndex = 2; +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocInfo.memoryTypeBits = ~(1u << excludedMemoryTypeIndex); +// ... +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That is the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +It is also not thread-safe. +Because of this, Vulkan Memory Allocator provides following facilities: + +\note If you want to be able to map an allocation, you need to specify one of the flags +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable +when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. +For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, +but these flags can still be used for consistency. + +\section memory_mapping_copy_functions Copy functions + +The easiest way to copy data from a host pointer to an allocation is to use convenience function vmaCopyMemoryToAllocation(). +It automatically maps the Vulkan memory temporarily (if not already mapped), performs `memcpy`, +and calls `vkFlushMappedMemoryRanges` (if required - if memory type is not `HOST_COHERENT`). + +It is also the safest one, because using `memcpy` avoids a risk of accidentally introducing memory reads +(e.g. by doing `pMappedVectors[i] += v`), which may be very slow on memory types that are not `HOST_CACHED`. + +\code +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer buf; +VmaAllocation alloc; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); + +vmaCopyMemoryToAllocation(allocator, &constantBufferData, alloc, 0, sizeof(ConstantBuffer)); +\endcode + +Copy in the other direction - from an allocation to a host pointer can be performed the same way using function vmaCopyAllocationToMemory(). + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific allocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it is implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VmaAllocator allocator = ... +VkBuffer constantBuffer = ... +VmaAllocation constantBufferAllocation = ... + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Keeping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up +in a mappable memory type. +For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. +For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on PC you may not need to bother. + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it is physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetHeapBudgets(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every +allocation, while vmaCalculateStatistics() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +VMA then tries to make the allocation from the next eligible Vulkan memory type. +If all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior +that allows to control the behavior of the Vulkan implementation in out-of-memory cases - +whether it should fail with an error code or still allow the allocation. +Usage of this extension involves only passing extra structure on Vulkan device creation, +so it is out of scope of this library. + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will currently try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +VMA also provides convenience functions that create a buffer or image and bind it to memory +represented by an existing #VmaAllocation: +vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(), +vmaCreateAliasingImage(), vmaCreateAliasingImage2(). +Versions with "2" offer additional parameter `allocationLocalOffset`. + +Remember that using resources that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset in an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. +You are using default pools whenever you leave VmaAllocationCreateInfo::pool = null. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. +- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in + #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. +- Perform defragmentation on a specific subset of your allocations. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Find memoryTypeIndex for the pool. +VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +sampleBufCreateInfo.size = 0x10000; // Doesn't matter. +sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo sampleAllocCreateInfo = {}; +sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, + &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +res = vmaCreatePool(allocator, &poolCreateInfo, &pool); +// Check res... + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +New versions of this library support creating dedicated allocations in custom pools. +It is supported only when VmaPoolCreateInfo::blockSize = 0. +To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and +VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Doesn't matter +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + + +\section custom_memory_pools_when_not_use When not to use custom pools + +Custom pools are commonly overused by VMA users. +While it may feel natural to keep some logical groups of resources separate in memory, +in most cases it does more harm than good. +Using custom pool shouldn't be your first choice. +Instead, please make all allocations from default pools first and only use custom pools +if you can prove and measure that it is beneficial in some way, +e.g. it results in lower memory usage, better performance, etc. + +Using custom pools has disadvantages: + +- Each pool has its own collection of `VkDeviceMemory` blocks. + Some of them may be partially or even completely empty. + Spreading allocations across multiple pools increases the amount of wasted (allocated but unbound) memory. +- You must manually choose specific memory type to be used by a custom pool (set as VmaPoolCreateInfo::memoryTypeIndex). + When using default pools, best memory type for each of your allocations can be selected automatically + using a carefully design algorithm that works across all kinds of GPUs. +- If an allocation from a custom pool at specific memory type fails, entire allocation operation returns failure. + When using default pools, VMA tries another compatible memory type. +- If you set VmaPoolCreateInfo::blockSize != 0, each memory block has the same size, + while default pools start from small blocks and only allocate next blocks larger and larger + up to the preferred block size. + +Many of the common concerns can be addressed in a different way than using custom pools: + +- If you want to keep your allocations of certain size (small versus large) or certain lifetime (transient versus long lived) + separate, you likely don't need to. + VMA uses a high quality allocation algorithm that manages memory well in various cases. + Please measure and check if using custom pools provides a benefit. +- If you want to keep your images and buffers separate, you don't need to. + VMA respects `bufferImageGranularity` limit automatically. +- If you want to keep your mapped and not mapped allocations separate, you don't need to. + VMA respects `nonCoherentAtomSize` limit automatically. + It also maps only those `VkDeviceMemory` blocks that need to map any allocation. + It even tries to keep mappable and non-mappable allocations in separate blocks to minimize the amount of mapped memory. +- If you want to choose a custom size for the default memory block, you can set it globally instead + using VmaAllocatorCreateInfo::preferredLargeHeapBlockSize. +- If you want to select specific memory type for your allocation, + you can set VmaAllocationCreateInfo::memoryTypeBits to `(1u << myMemoryTypeIndex)` instead. +- If you need to create a buffer with certain minimum alignment, you can still do it + using default pools with dedicated function vmaCreateBufferWithAlignment(). + + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. +You don't need to specify explicitly which of these options you are going to use - it is detected automatically. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature. +It doesn't happen automatically though and needs your cooperation, +because VMA is a low level library that only allocates memory. +It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. +It cannot copy their contents as it doesn't record any commands to a command buffer. + +Example: + +\code +VmaDefragmentationInfo defragInfo = {}; +defragInfo.pool = myPool; +defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; + +VmaDefragmentationContext defragCtx; +VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); +// Check res... + +for(;;) +{ + VmaDefragmentationPassMoveInfo pass; + res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... + + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo); + MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; + + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + VkImageCreateInfo imgCreateInfo = ... + VkImage newImg; + res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); + // Check res... + res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg); + // Check res... + + // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); + } + + // Make sure the copy commands finished executing. + vkWaitForFences(...); + + // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // ... + vkDestroyImage(device, resData->img, nullptr); + } + + // Update appropriate descriptors to point to the new places... + + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... +} + +vmaEndDefragmentation(allocator, defragCtx, nullptr); +\endcode + +Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() +create/destroy an allocation and a buffer/image at once, these are just a shortcut for +creating the resource, allocating memory, and binding them together. +Defragmentation works on memory allocations only. You must handle the rest manually. +Defragmentation is an iterative process that should repreat "passes" as long as related functions +return `VK_INCOMPLETE` not `VK_SUCCESS`. +In each pass: + +1. vmaBeginDefragmentationPass() function call: + - Calculates and returns the list of allocations to be moved in this pass. + Note this can be a time-consuming process. + - Reserves destination memory for them by creating temporary destination allocations + that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). +2. Inside the pass, **you should**: + - Inspect the returned list of allocations to be moved. + - Create new buffers/images and bind them at the returned destination temporary allocations. + - Copy data from source to destination resources if necessary. + - Destroy the source buffers/images, but NOT their allocations. +3. vmaEndDefragmentationPass() function call: + - Frees the source memory reserved for the allocations that are moved. + - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. + - Frees `VkDeviceMemory` blocks that became empty. + +Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. +Defragmentation algorithm tries to move all suitable allocations. +You can, however, refuse to move some of them inside a defragmentation pass, by setting +`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. +This is not recommended and may result in suboptimal packing of the allocations after defragmentation. +If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. + +Inside a pass, for each allocation that should be moved: + +- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. + - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). +- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, + filled, and used temporarily in each rendering frame, you can just recreate this image + without copying its data. +- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU + using `memcpy()`. +- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + This will cancel the move. + - vmaEndDefragmentationPass() will then free the destination memory + not the source memory of the allocation, leaving it unchanged. +- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), + you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. + +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. + +Defragmentation is always performed in each pool separately. +Allocations are never moved between different Vulkan memory types. +The size of the destination memory reserved for a moved allocation is the same as the original one. +Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. +Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. + +You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved +in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. +See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. + +It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA +usage, possibly from multiple threads, with the exception that allocations +returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. + +Mapping is preserved on allocations that are moved during defragmentation. +Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations +are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried +using VmaAllocationInfo::pMappedData. + +\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page statistics Statistics + +This library contains several functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. + +\section statistics_numeric_statistics Numeric statistics + +If you need to obtain basic statistics about memory usage per heap, together with current budget, +you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. +This is useful to keep track of memory usage and stay within budget +(see also \ref staying_within_budget). +Example: + +\code +uint32_t heapIndex = ... + +VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; +vmaGetHeapBudgets(allocator, budgets); + +printf("My heap currently has %u allocations taking %llu B,\n", + budgets[heapIndex].statistics.allocationCount, + budgets[heapIndex].statistics.allocationBytes); +printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", + budgets[heapIndex].statistics.blockCount, + budgets[heapIndex].statistics.blockBytes); +printf("Vulkan reports total usage %llu B with budget %llu B.\n", + budgets[heapIndex].usage, + budgets[heapIndex].budget); +\endcode + +You can query for more detailed statistics per memory heap, type, and totals, +including minimum and maximum allocation size and unused range size, +by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. +This function is slower though, as it has to traverse all the internal data structures, +so it should be used only for debugging purposes. + +You can query for statistics of a custom pool using function vmaGetPoolStatistics() +or vmaCalculatePoolStatistics(). + +You can query for information about a specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. +It is useful to identify appropriate data structures in your engine given #VmaAllocation, +e.g. when doing \ref defragmentation. + +\code +VkBufferCreateInfo bufCreateInfo = ... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString() in hexadecimal form. + +\section allocation_names Allocation names + +An allocation can also carry a null-terminated string, giving a name to the allocation. +To set it, call vmaSetAllocationName(). +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +std::string imageName = "Texture: "; +imageName += fileName; +vmaSetAllocationName(allocator, allocation, imageName.c_str()); +\endcode + +The string can be later retrieved by inspecting VmaAllocationInfo::pName. +It is also printed in JSON report created by vmaBuildStatsString(). + +\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page virtual_allocator Virtual allocator + +As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". +It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". +You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. +A common use case is sub-allocation of pieces of one large GPU buffer. + +\section virtual_allocator_creating_virtual_block Creating virtual block + +To use this functionality, there is no main "allocator" object. +You don't need to have #VmaAllocator object created. +All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: + +-# Fill in #VmaVirtualBlockCreateInfo structure. +-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. + +Example: + +\code +VmaVirtualBlockCreateInfo blockCreateInfo = {}; +blockCreateInfo.size = 1048576; // 1 MB + +VmaVirtualBlock block; +VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); +\endcode + +\section virtual_allocator_making_virtual_allocations Making virtual allocations + +#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions +using the same code as the main Vulkan memory allocator. +Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type +that represents an opaque handle to an allocation within the virtual block. + +In order to make such allocation: + +-# Fill in #VmaVirtualAllocationCreateInfo structure. +-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. + You can also receive `VkDeviceSize offset` that was assigned to the allocation. + +Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB + +VmaVirtualAllocation alloc; +VkDeviceSize offset; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); +if(res == VK_SUCCESS) +{ + // Use the 4 KB of your memory starting at offset. +} +else +{ + // Allocation failed - no space for it could be found. Handle this error! +} +\endcode + +\section virtual_allocator_deallocation Deallocation + +When no longer needed, an allocation can be freed by calling vmaVirtualFree(). +You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() +called for the same #VmaVirtualBlock. + +When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). +All allocations must be freed before the block is destroyed, which is checked internally by an assert. +However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - +a feature not available in normal Vulkan memory allocator. Example: + +\code +vmaVirtualFree(block, alloc); +vmaDestroyVirtualBlock(block); +\endcode + +\section virtual_allocator_allocation_parameters Allocation parameters + +You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). +Its default value is null. +It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some +larger data structure containing more information. Example: + +\code +struct CustomAllocData +{ + std::string m_AllocName; +}; +CustomAllocData* allocData = new CustomAllocData(); +allocData->m_AllocName = "My allocation 1"; +vmaSetVirtualAllocationUserData(block, alloc, allocData); +\endcode + +The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function +vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. +If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! +Example: + +\code +VmaVirtualAllocationInfo allocInfo; +vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); +delete (CustomAllocData*)allocInfo.pUserData; + +vmaVirtualFree(block, alloc); +\endcode + +\section virtual_allocator_alignment_and_units Alignment and units + +It feels natural to express sizes and offsets in bytes. +If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member +VmaVirtualAllocationCreateInfo::alignment to request it. Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB +allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B + +VmaVirtualAllocation alloc; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); +\endcode + +Alignments of different allocations made from one block may vary. +However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, +you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. +It might be more convenient, but you need to make sure to use this new unit consistently in all the places: + +- VmaVirtualBlockCreateInfo::size +- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment +- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset + +\section virtual_allocator_statistics Statistics + +You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() +(to get brief statistics that are fast to calculate) +or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). +The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. +Example: + +\code +VmaStatistics stats; +vmaGetVirtualBlockStatistics(block, &stats); +printf("My virtual block has %llu bytes used by %u virtual allocations\n", + stats.allocationBytes, stats.allocationCount); +\endcode + +You can also request a full list of allocations and free regions as a string in JSON format by calling +vmaBuildVirtualBlockStatsString(). +Returned string must be later freed using vmaFreeVirtualBlockStatsString(). +The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. + +\section virtual_allocator_additional_considerations Additional considerations + +The "virtual allocator" functionality is implemented on a level of individual memory blocks. +Keeping track of a whole collection of blocks, allocating new ones when out of free space, +deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. + +Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. +See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). +You can find their description in chapter \ref custom_memory_pools. +Allocation strategies are also supported. +See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). + +Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: +buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. +It works also with dedicated allocations. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +Margins do not apply to \ref virtual_allocator. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it is not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\section debugging_memory_usage_leak_detection Leak detection features + +At allocation and allocator destruction time VMA checks for unfreed and unmapped blocks using +`VMA_ASSERT_LEAK()`. This macro defaults to an assertion, triggering a typically fatal error in Debug +builds, and doing nothing in Release builds. You can provide your own definition of `VMA_ASSERT_LEAK()` +to change this behavior. + +At memory block destruction time VMA lists out all unfreed allocations using the `VMA_LEAK_LOG_FORMAT()` +macro, which defaults to `VMA_DEBUG_LOG_FORMAT`, which in turn defaults to a no-op. +If you're having trouble with leaks - for example, the aforementioned assertion triggers, but you don't +quite know \em why -, overriding this macro to print out the the leaking blocks, combined with assigning +individual names to allocations using vmaSetAllocationName(), can greatly aid in fixing them. + +\page other_api_interop Interop with other graphics APIs + +VMA provides some features that help with interoperability with other graphics APIs, e.g. OpenGL. + +\section opengl_interop_exporting_memory Exporting memory + +If you want to attach `VkExportMemoryAllocateInfoKHR` or other structure to `pNext` chain of memory allocations made by the library: + +You can create \ref custom_memory_pools for such allocations. +Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext +while creating the custom pool. +Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, +not only while creating it, as no copy of the structure is made, +but its original pointer is used for each allocation instead. + +If you want to export all memory allocated by VMA from certain memory types, +also dedicated allocations or other allocations made from default pools, +an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. +It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library +through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. +Please note that new versions of the library also support dedicated allocations created in custom pools. + +You should not mix these two methods in a way that allows to apply both to the same memory type. +Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. + + +\section opengl_interop_custom_alignment Custom alignment + +Buffers or images exported to a different API like OpenGL may require a different alignment, +higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. +To impose such alignment: + +You can create \ref custom_memory_pools for such allocations. +Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation +to be made out of this pool. +The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image +from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. + +If you want to create a buffer with a specific minimum alignment out of default pools, +use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. + +Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated +allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. +You can ensure that an allocation is created as dedicated by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. + +\section opengl_interop_extended_allocation_information Extended allocation information + +If you want to rely on VMA to allocate your buffers and images inside larger memory blocks, +but you need to know the size of the entire block and whether the allocation was made +with its own dedicated memory, use function vmaGetAllocationInfo2() to retrieve +extended allocation information in structure #VmaAllocationInfo2. + + + +\page usage_patterns Recommended usage patterns + +Vulkan gives great flexibility in memory allocation. +This chapter shows the most common patterns. + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_gpu_only GPU-only resource + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +Also consider: +Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them with different sizes +e.g. when display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. +When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation +to decrease chances to be evicted to system memory by the operating system. + +\section usage_patterns_staging_copy_upload Staging copy for upload + +When: +A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer +to some GPU resource. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +memcpy(allocInfo.pMappedData, myData, myDataSize); +\endcode + +Also consider: +You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped +using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. + + +\section usage_patterns_readback Readback + +When: +Buffers for data written by or transferred from the GPU that you want to read back on the CPU, +e.g. results of some computations. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +const float* downloadedData = (const float*)allocInfo.pMappedData; +\endcode + + +\section usage_patterns_advanced_data_uploading Advanced data uploading + +For resources that you frequently write on CPU via mapped pointer and +frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: + +-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, + even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, + and make the device reach out to that resource directly. + - Reads performed by the device will then go through PCI Express bus. + The performance of this access may be limited, but it may be fine depending on the size + of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity + of access. +-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), + a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` + (fast to access from the GPU). Then, it is likely the best choice for such type of resource. +-# Systems with a discrete graphics card and separate video memory may or may not expose + a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). + If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) + that is available to CPU for mapping. + - Writes performed by the host to that memory go through PCI Express bus. + The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, + as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. +-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, + a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. + +Thankfully, VMA offers an aid to create and use such resources in the the way optimal +for the current Vulkan device. To help the library make the best choice, +use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with +#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. +It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), +but if no such memory type is available or allocation from it fails +(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), +it will fall back to `DEVICE_LOCAL` memory for fast GPU access. +It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, +so you need to create another "staging" allocation and perform explicit transfers. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +VkResult result = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); +// Check result... + +VkMemoryPropertyFlags memPropFlags; +vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); + +if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) +{ + // Allocation ended up in a mappable memory and is already mapped - write to it directly. + + // [Executed in runtime]: + memcpy(allocInfo.pMappedData, myData, myDataSize); + result = vmaFlushAllocation(allocator, alloc, 0, VK_WHOLE_SIZE); + // Check result... + + VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; + bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + bufMemBarrier.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; + bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.buffer = buf; + bufMemBarrier.offset = 0; + bufMemBarrier.size = VK_WHOLE_SIZE; + + vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, + 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr); +} +else +{ + // Allocation ended up in a non-mappable memory - a transfer using a staging buffer is required. + VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + stagingBufCreateInfo.size = 65536; + stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo stagingAllocCreateInfo = {}; + stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer stagingBuf; + VmaAllocation stagingAlloc; + VmaAllocationInfo stagingAllocInfo; + result = vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, + &stagingBuf, &stagingAlloc, &stagingAllocInfo); + // Check result... + + // [Executed in runtime]: + memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); + result = vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE); + // Check result... + + VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; + bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + bufMemBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.buffer = stagingBuf; + bufMemBarrier.offset = 0; + bufMemBarrier.size = VK_WHOLE_SIZE; + + vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr); + + VkBufferCopy bufCopy = { + 0, // srcOffset + 0, // dstOffset, + myDataSize, // size + }; + + vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); + + VkBufferMemoryBarrier bufMemBarrier2 = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; + bufMemBarrier2.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + bufMemBarrier2.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; // We created a uniform buffer + bufMemBarrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier2.buffer = buf; + bufMemBarrier2.offset = 0; + bufMemBarrier2.size = VK_WHOLE_SIZE; + + vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, + 0, 0, nullptr, 1, &bufMemBarrier2, 0, nullptr); +} +\endcode + +\section usage_patterns_other_use_cases Other use cases + +Here are some other, less obvious use cases and their recommended settings: + +- An image that is used only as transfer source and destination, but it should stay on the device, + as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, + for temporal antialiasing or other temporal effects. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO +- An image that is used only as transfer source and destination, but it should be placed + in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict + least recently used textures from VRAM. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, + as VMA needs a hint here to differentiate from the previous case. +- A buffer that you want to map and write from the CPU, directly read from the GPU + (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or + host memory due to its large size. + - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST + - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +Similarly, you can define `VMA_LEAK_LOG_FORMAT` macro to enable printing of leaked (unfreed) allocations, +including their names and other parameters. Example: + +\code +#define VMA_LEAK_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ + } while(false) +\endcode + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. +Other pointers will be fetched automatically. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing_ +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. The extension is supported by this library. +It will be used automatically when enabled. + +It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version +and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, +you are all set. + +Otherwise, if you want to use it as an extension: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That is all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + +_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_ext_memory_priority VK_EXT_memory_priority + +VK_EXT_memory_priority is a device extension that allows to pass additional "priority" +value to Vulkan memory allocations that the implementation may use prefer certain +buffers and images that are critical for performance to stay in device-local memory +in cases when the memory is over-subscribed, while some others may be moved to the system memory. + +VMA offers convenient usage of this extension. +If you enable it, you can pass "priority" parameter when creating allocations or custom pools +and the library automatically passes the value to Vulkan using this extension. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_ext_memory_priority_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to +`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_ext_memory_priority_usage Usage + +When using this extension, you should initialize following member: + +- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +- VmaPoolCreateInfo::priority when creating a custom pool. + +It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. +Memory allocated with higher value can be treated by the Vulkan implementation as higher priority +and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. + +It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images +as dedicated and set high priority to them. For example: + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +`priority` member is ignored in the following situations: + +- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters + from the parameters passed in #VmaPoolCreateInfo when the pool was created. +- Allocations created in default pools: They inherit the priority from the parameters + VMA used when creating default pools, which means `priority == 0.5f`. + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page vk_khr_external_memory_win32 VK_KHR_external_memory_win32 + +On Windows, the VK_KHR_external_memory_win32 device extension allows exporting a Win32 `HANDLE` +of a `VkDeviceMemory` block, to be able to reference the memory on other Vulkan logical devices or instances, +in multiple processes, and/or in multiple APIs. +VMA offers support for it. + +\section vk_khr_external_memory_win32_initialization Initialization + +1) Make sure the extension is defined in the code by including following header before including VMA: + +\code +#include +\endcode + +2) Check if "VK_KHR_external_memory_win32" is available among device extensions. +Enable it when creating the `VkDevice` object. + +3) Enable the usage of this extension in VMA by setting flag #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT +when calling vmaCreateAllocator(). + +4) Make sure that VMA has access to the `vkGetMemoryWin32HandleKHR` function by either enabling `VMA_DYNAMIC_VULKAN_FUNCTIONS` macro +or setting VmaVulkanFunctions::vkGetMemoryWin32HandleKHR explicitly. +For more information, see \ref quick_start_initialization_importing_vulkan_functions. + +\section vk_khr_external_memory_win32_preparations Preparations + +You can find example usage among tests, in file "Tests.cpp", function `TestWin32Handles()`. + +To use the extenion, buffers need to be created with `VkExternalMemoryBufferCreateInfoKHR` attached to their `pNext` chain, +and memory allocations need to be made with `VkExportMemoryAllocateInfoKHR` attached to their `pNext` chain. +To make use of them, you need to use \ref custom_memory_pools. Example: + +\code +// Define an example buffer and allocation parameters. +VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, + nullptr, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT +}; +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 0x10000; // Doesn't matter here. +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; +exampleBufCreateInfo.pNext = &externalMemBufCreateInfo; + +VmaAllocationCreateInfo exampleAllocCreateInfo = {}; +exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +// Find memory type index to use for the custom pool. +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_Allocator, + &exampleBufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a custom pool. +constexpr static VkExportMemoryAllocateInfoKHR exportMemAllocInfo = { + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, + nullptr, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT +}; +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.pMemoryAllocateNext = (void*)&exportMemAllocInfo; + +VmaPool pool; +res = vmaCreatePool(g_Allocator, &poolCreateInfo, &pool); +// Check res... + +// YOUR OTHER CODE COMES HERE.... + +// At the end, don't forget to destroy it! +vmaDestroyPool(g_Allocator, pool); +\endcode + +Note that the structure passed as VmaPoolCreateInfo::pMemoryAllocateNext must remain alive and unchanged +for the whole lifetime of the custom pool, because it will be used when the pool allocates a new device memory block. +No copy is made internally. This is why variable `exportMemAllocInfo` is defined as `static`. + +\section vk_khr_external_memory_win32_memory_allocation Memory allocation + +Finally, you can create a buffer with an allocation out of the custom pool. +The buffer should use same flags as the sample buffer used to find the memory type. +It should also specify `VkExternalMemoryBufferCreateInfoKHR` in its `pNext` chain. + +\code +VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, + nullptr, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT +}; +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = // Your desired buffer size. +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; +bufCreateInfo.pNext = &externalMemBufCreateInfo; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; // It is enough to set this one member. + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(g_Allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... + +// YOUR OTHER CODE COMES HERE.... + +// At the end, don't forget to destroy it! +vmaDestroyBuffer(g_Allocator, buf, alloc); +\endcode + +If you need each allocation to have its own device memory block and start at offset 0, you can still do +by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag. It works also with custom pools. + +\section vk_khr_external_memory_win32_exporting_win32_handle Exporting Win32 handle + +After the allocation is created, you can acquire a Win32 `HANDLE` to the `VkDeviceMemory` block it belongs to. +VMA function vmaGetMemoryWin32Handle() is a replacement of the Vulkan function `vkGetMemoryWin32HandleKHR`. + +\code +HANDLE handle; +res = vmaGetMemoryWin32Handle(g_Allocator, alloc, nullptr, &handle); +// Check res... + +// YOUR OTHER CODE COMES HERE.... + +// At the end, you must close the handle. +CloseHandle(handle); +\endcode + +Documentation of the VK_KHR_external_memory_win32 extension states that: + +> If handleType is defined as an NT handle, vkGetMemoryWin32HandleKHR must be called no more than once for each valid unique combination of memory and handleType. + +This is ensured automatically inside VMA. +The library fetches the handle on first use, remembers it internally, and closes it when the memory block or dedicated allocation is destroyed. +Every time you call vmaGetMemoryWin32Handle(), VMA calls `DuplicateHandle` and returns a new handle that you need to close. + +For further information, please check documentation of the vmaGetMemoryWin32Handle() function. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It has been promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. + This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. +- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. + +\section general_considerations_versioning_and_compatibility Versioning and compatibility + +The library uses [**Semantic Versioning**](https://semver.org/), +which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: + +- Incremented Patch version means a release is backward- and forward-compatible, + introducing only some internal improvements, bug fixes, optimizations etc. + or changes that are out of scope of the official API described in this documentation. +- Incremented Minor version means a release is backward-compatible, + so existing code that uses the library should continue to work, while some new + symbols could have been added: new structures, functions, new values in existing + enums and bit flags, new structure members, but not new function parameters. +- Incrementing Major version means a release could break some backward compatibility. + +All changes between official releases are documented in file "CHANGELOG.md". + +\warning Backward compatibility is considered on the level of C++ source code, not binary linkage. +Adding new members to existing structures is treated as backward compatible if initializing +the new members to binary zero results in the old behavior. +You should always fully initialize all library structures to zeros and not rely on their +exact binary size. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It may happen when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size / 2, size / 4, size / 8. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. + VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. +-# **Recreation of buffers and images.** Although the library has functions for + buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to + recreate these objects yourself after defragmentation. That is because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +-# **Handling CPU memory allocation failures.** When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +-# **Code free of any compiler warnings.** Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. + There are many preprocessor macros that make some variables unused, function parameters unreferenced, + or conditional expressions constant in some configurations. + The code of this library should not be bigger or more complicated just to silence these warnings. + It is recommended to disable such warnings instead. +-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but + are not going to be included into this repository. +*/ + diff --git a/client/main.go b/client/main.go deleted file mode 100644 index a04b34d..0000000 --- a/client/main.go +++ /dev/null @@ -1,221 +0,0 @@ -package main - -import ( - "time" - - "github.com/gen2brain/raylib-go/raylib" -) - -type ClientAuth struct { - -} - -type Entity struct { - -} - -type LoginState int - -const ( - LOGIN_STATE_CREDENTIALS LoginState = iota - LOGIN_STATE_ATTEMPTING - LOGIN_STATE_ERROR -) - -type ButtonState int - -const ( - BUTTON_STATE_IDLE ButtonState = iota - BUTTON_STATE_HOVER - BUTTON_STATE_PRESSED -) - -type Scale struct { - window rl.Vector2 - ui uint -} - -type ClientState struct { - auth *ClientAuth - login LoginState - login_button ButtonState - scale Scale - screen rl.Rectangle - camera rl.Camera - - command_queue chan interface{} - network_queue chan interface{} - - context *Entity - context_pos rl.Vector2 -} - -const ( - WIDTH uint = 1600 - HEIGHT uint = 1000 -) - -func scale(original rl.Rectangle, scale Scale) rl.Rectangle { - return rl.Rectangle{ - X: original.X, - Y: original.Y, - Width: original.Width*float32(scale.ui)/scale.window.X, - Height: original.Height*float32(scale.ui)/scale.window.Y, - } -} - -func offset(original rl.Rectangle, off_x, off_y int, scale Scale) rl.Rectangle { - return rl.Rectangle{ - X: original.X + float32(off_x)/scale.window.X, - Y: original.Y + float32(off_y)/scale.window.Y, - Width: original.Width, - Height: original.Height, - } -} - -func center(container, original rl.Rectangle) rl.Rectangle { - return rl.Rectangle{ - X: (container.Width - original.Width )/2 + container.X, - Y: (container.Height - original.Height)/2 + container.Y, - Width: original.Width, - Height: original.Height, - } -} - -func button(rectangle rl.Rectangle, idle, hover, pressed rl.Color, text string, font_size int32, text_color rl.Color, last_state ButtonState) (action bool, next_state ButtonState) { - over := rl.CheckCollisionPointRec(rl.GetMousePosition(), rectangle) - action = false - next_state = last_state - - if(rl.IsMouseButtonPressed(rl.MouseButtonLeft) && over) { - next_state = BUTTON_STATE_PRESSED - } else if(rl.IsMouseButtonReleased(rl.MouseButtonLeft) && last_state == BUTTON_STATE_PRESSED) { - next_state = BUTTON_STATE_IDLE - if(over) { - action = true - } - } else if(rl.IsMouseButtonUp(rl.MouseButtonLeft)) { - if(last_state == BUTTON_STATE_IDLE && over) { - next_state = BUTTON_STATE_HOVER - } else if (last_state == BUTTON_STATE_HOVER && !over) { - next_state = BUTTON_STATE_IDLE - } - } - - var color rl.Color - switch(next_state) { - case BUTTON_STATE_HOVER: - color = hover - case BUTTON_STATE_PRESSED: - color = pressed - default: - color = idle - } - rl.DrawRectangleRec(rectangle, color) - text_size := rl.MeasureTextEx(rl.GetFontDefault(), text, float32(font_size), 1.0) - rl.DrawText(text, int32(rectangle.X + rectangle.Width/2 - text_size.X/2), int32(rectangle.Y + rectangle.Height/2 - text_size.Y/2), font_size, text_color) - - return action, next_state -} - -func NetworkThread(network_queue chan interface{}) { - for(true) { - // TODO: remove - time.Sleep(time.Millisecond) - } -} - -func LogicThread(state *ClientState) { - for(true) { - select { - case _ = <- state.command_queue: - case _ = <- state.network_queue: - } - time.Sleep(time.Millisecond) - } -} - -func main() { - state := ClientState{ - auth: nil, - login: LOGIN_STATE_CREDENTIALS, - login_button: BUTTON_STATE_IDLE, - scale: Scale{ - ui: 1, - }, - camera: rl.NewCamera3D(rl.Vector3{}, rl.Vector3{}, rl.Vector3{}, 90, rl.CameraOrthographic), - context: nil, - } - - go LogicThread(&state) - - rl.SetConfigFlags(rl.FlagWindowHighdpi) - rl.InitWindow(0, 0, "roleplay") - rl.SetExitKey(0) - rl.SetTargetFPS(60) - - state.scale.window = rl.GetWindowScaleDPI() - rl.SetWindowSize(int(float32(WIDTH)/state.scale.window.X), int(float32(HEIGHT)/state.scale.window.Y)) - - state.screen.Width = float32(WIDTH) /state.scale.window.X - state.screen.Height = float32(HEIGHT)/state.scale.window.Y - - for(rl.WindowShouldClose() == false) { - if(state.auth == nil) { - // Draw login - rl.BeginDrawing() - rl.ClearBackground(rl.Black) - - logo_rect := scale(rl.Rectangle{Width: 1200, Height: 300}, state.scale) - logo_rect = center(state.screen, logo_rect) - logo_rect = offset(logo_rect, 0, -250, state.scale) - rl.DrawRectangleRec(logo_rect, rl.Gray) - - form_rect := scale(rl.Rectangle{Width: 600, Height: 400}, state.scale) - form_rect = center(state.screen, form_rect) - form_rect = offset(form_rect, 0, 200, state.scale) - rl.DrawRectangleRec(form_rect, rl.Gray) - - switch(state.login) { - case LOGIN_STATE_CREDENTIALS: - submit_rect := scale(rl.Rectangle{Width: 100, Height: 50}, state.scale) - submit_rect = center(form_rect, submit_rect) - submit_rect = offset(submit_rect, 0, 125, state.scale) - var submit_action bool - submit_action, state.login_button = button(submit_rect, rl.Black, rl.Brown, rl.Red, "Submit", 12, rl.White, state.login_button) - if submit_action { - // TODO: real auth via network thread - state.auth = &ClientAuth{} - } - case LOGIN_STATE_ATTEMPTING: - text := "Logging in..." - text_size := rl.MeasureTextEx(rl.GetFontDefault(), text, 20, 1.0) - text_rect := center(form_rect, rl.Rectangle{Width: text_size.X, Height: text_size.Y}) - rl.DrawText(text, int32(text_rect.X), int32(text_rect.Y), 20, rl.Black) - case LOGIN_STATE_ERROR: - text := "Error: {TODO}" - text_size := rl.MeasureTextEx(rl.GetFontDefault(), text, 20, 1.0) - text_rect := center(form_rect, rl.Rectangle{Width: text_size.X, Height: text_size.Y}) - rl.DrawText(text, int32(text_rect.X), int32(text_rect.Y), 20, rl.Black) - } - - rl.EndDrawing() - } else { - rl.BeginDrawing() - rl.ClearBackground(rl.Black) - rl.BeginMode3D(state.camera) - - if rl.IsMouseButtonPressed(rl.MouseButtonRight) { - state.context_pos = rl.GetMousePosition() - // TODO: Cast a ray into the bounding boxes - } - - if state.context != nil { - - } - - rl.EndMode3D() - rl.EndDrawing() - } - } -} diff --git a/client/src/main.c b/client/src/main.c new file mode 100644 index 0000000..9e55750 --- /dev/null +++ b/client/src/main.c @@ -0,0 +1,34 @@ +#include "render.h" + +int render_thread(GLFWwindow* window, RenderContext* render_context) { + while(glfwWindowShouldClose(window) == 0) { + glfwPollEvents(); + } + return 0; +} + +int logic_thread() { + return 0; +} + +int network_thread() { + return 0; +} + +int main() { + GLFWwindow* window = init_window(); + if(window == NULL) { + return 1; + } + + RenderContext render_context = {}; + if(init_vulkan(window, &render_context) != VK_SUCCESS) { + return 2; + } + + if(render_thread(window, &render_context) != 0) { + return 3; + } + + return 0; +} diff --git a/client/src/main.o b/client/src/main.o new file mode 100644 index 0000000000000000000000000000000000000000..aa6985af0d4eb992be6a930b21269bf6f525044c GIT binary patch literal 24944 zcmbW92Yggj`uERsPcjT7z=V(x1VU7r1yX21P?=;V$v~1BW)iA0gk&I*G*c*wk_B~H zJBqzwN7=PtSuD7)cGrfh;##q+;#vS3C?fCgIrq+F%AJA#o6qMv&$;J3=Q+=5_nteM zbKdyvyFXHeAb~&rNCLYhK5_v527-FNj`R1}JUzk68Sxjq?tw7VizD&p_jd%Acj#gL zy2?4bs{1+^+G_;Zo`&!?zYrFVOA=iyf`5L0U0`A7Vt>uTg>8YQY~L}i=dsm zRn3GrVE7kO(9MS~gZ3Jl7qxI?w7qNC8T`%fqsv>AQ+n9=G&J<9puP5vHh;^aMeWh{ zezfU&^;TUT>~C1<7V+#Ujc9xGxTElwF2 z*o*!!n$px>b4^pA-NfGYhP|~$<_)kHVIb<#@2_oXYOSg5@OL&ZYiO?X`^z1MD@x?d z(XYCU$Wdns!Nb(HAhCaq4b1`05?$X7hP_;4yljL${>xinOpX};^zp~`{M^Y=mpBhW1(s$YQkET9+PH>AZ%9ACIVh!n`(kZU11>;e48It({056fimfBieW zM(j9rF5P#iSghOCL;U)w6u-ik{=0fi{S~&>cL_0`uQ`11@ZkYMba@Y*YrhZq)@_si zclkP8{@(J;eL;JNWuG1Ycs6?r=iMN)4{Vh44{XG(g6)k$zKuwO!T!69vZ=A_E!O=M zWp9A4$|uA))-N;kQUCs&_9^m&r`H?l7z_QmOwP06+4)=G!#ei&_m+QdWIrj5h7V6q zmcv)C9KL7O@U{WA0fUC84eDoUhbLMRxzgqEl$29aCZjRsC?SUrMxP`NL*TTO6cnDU zR7M0d6)%rx{)+76li^yy1E7(L@CWI;kHR+cQFQ%ZJ^iV7}WID+aDaq^y zZXms(;!5X>LR@BsUBbYCkSZ>+LTSmYIxy@KsmBl+QHVjuqWpr|%K2x^Lm#uzz_5#^fBik3sj#}ASCzl4~ zvPE+FV!5JTp3@-DJxi`!BF}4-=Qqh!&9bXS{;5^2K3lG7lNYqh3p?aRo$}91<=SQP zFU#e{E952T$V<O&62A`4W=1TuSoR%ShgKImr!Iki7j$l6R~ldFNFm@4A}g-Rnu-a}CLR zuO+$hI+FKYPxAg7NN&23+h`L~-%K5z@k2X7_$&}}3i-avBm?Ia($gXE)kl6>qg zl8@g_^6&SM+;T6;CpMCN@;;JJ-B0rAO(eJemE<#jBl(X9NN#(O`@?W1S@#<$vy!N>guYaM$8~;_}%`cVM z@s$#9?N;L7Un}wU|CD&=8ztWTR*9Y8De>O-O8jS!67TO-;)8ukeE5SBAN{Dr$3H3Y z$eN_>0e zJ%@4i-8v<{zeI7Iv9Ji*`^5KEjCjAThd5X zz|xOY4J_%Ts$l6)st%S6QkAe|lB$Je0I6zNvPiGTKvF};hDa%Qu29@PxQlrW;n$)nej3G6yEV-lxmL-qW$g<><8d{bDQe(?9 zmek;~j3YI=EaOQHFUtf{85`v>7iGZoI$TC>800|^wHY&2T)!W%C0ub zni=k!AHKad#4;P{bBHW+Nwdr&X*q+g-_L%YNjE4t-^kNrJ068MBhQuA8WPrpMAll; ztaT)<0ZOtiqGao0T4k-L^Q{eZp7ktRX8^+PLLv$Uo7&O0K4Rlw3o1D|rFkrR0Tl zr;-=Zn>TRzf2KEXz8G>X?Y@)!_zUgYeh}qdOrI)w34NyIrS!Rym(ki+S3~b|dg!Ju z$Sdg0m)CwItyj{P9W2+;OAqY7W2N;fqrO)g^<8h&_Zp+TYmIteXUOY~`rly48x6ZR z8Qs!z0VOB(N)c1B-k_#iNJ#FdRP74u?Fdimslwt;gr_4cdaI=lwcJXg7c2JGc!5JL zw~^?>*L~y$WoQG5etg|mTekp)(%Dd7?Ft(VWqH_;?KFI*mEq$K(l#aayMt^TpFRg4 z12pOFpQ$a7@`Q}LXfs~W<%uHWUV4^8Cs{M@r*}AXvMu9p^d*NzXJu}u9ax;nF*#c1 zbM!d}b4O{Juj;|PLM`((J(ypnWxh_w;R^*>Fk8#qL1Q^MZh@Bh9l5XymgBc*ncvgJ z9GtLC%iKdhv%B^ItDOwH(rx(i1K6QqCBCQ#bj}Wcxna-`gnJ6&iakBO}W-YFiEt^QY z3gLec$<)FBARCF)tS9IpL=nN(R(g?xTB=o~KY`HGM0M%^MBph(9@_Is>S0wyJVSI& z&$JH^|18nVLu|Vc>i17d{}%FJB!^_bLN+-t<9SNs5pj&jcu5}xgRB`Z>!HD^BK;U7 zzDU$)8;U@`7byKC$Tx|W*z!2y4N9K^`7Ozj+27D?IW*%f>O_Rh5gA{QgZufo!i?Rz zz2m25d`%Z}-0-rD|LJifJR)rsGJH;ymOc1$8b_AwtbeOC>X9w7-qwTG?4+!B)Yzq- z+19N8=)qpuwygK{U`lpY)(5(7YIaW6hcpD`Q19$vSs&9#4))0&mGuebaj!lkDSaQLrqL?fFC0Nyx`w&cL!%Y89tiZaXtutP=~?*b zrBSnO2*>u+Y{NNr3_f~m)MOjWv8kGEGRHdbk)~0L&Bd`cExj7DzedY#{8rg7T@!V< zgW^3^{bl3x1b}b1L?hS)`1^{FK{CEW7!9hy1z;=K5AyMy%S><2bDJpxPtUeEx# zzztwCcnN$AegeJGZ6kmkoB=w(rQj~G6}$`ffmD1~I}A(#RiFc`2akjIz*^&o}-;Faw+gE(EuOt>6Q22xQ|soC4qh0k8~Q4ITuqg8zXY_=8CVDI z2hV|z!2!@OO^7jI8mI&3f!n|~@CE46Pl%I1C1?Z}gH7O7@D1pRZ&^lx5)c4uz#U*4 z_yimRgYm`PDPRs*2CfB7r+TB!D4U;=mb}Tx!@M?FxU>>2S0;v!6(2v0p)|& z5bh6h5FQUk0S5?x4$uhB2N!}lzy~e?H-L>`3wRJb3l4$fCt|FC)4^;oA5?$<=m2ZL zbzl>C0qg{OfVB|)33?$s2GRvQU;>y28o)B}CCYdQT!Qe;;1Tc~_z1iXz6VK@a0>t# zU?gyXQ^72-5;TKL!42Sk@Hlu6yaINEUqI3+umNn~WH1wyfO>ERSOabWTfh$RGWY=O z2j7CE$*4Q}$pgw-ngaMI<+7aG#+v5EIZFd=?F}u>Ir)>u99N2QiucyxLN6OW)*=JvYgx+|;f<^Bqn*X#BsE2Q{~DxD7Q zKqJmOu5=s{xSKup;^~D^Nk!xA;mP?E!qSV7)teyQJHB*6(fBaqk-3rTjhl+R}BN%7i}%Zc==J4?Ly zNPqQOnD`q?i1s&>5Zm*iq*#ALNm2fW;-mV%J3i81J6gAU45%B|*n+Tm6-rlbCrCHE zPoQB`B7p|iB|$P*DN!2{nsfr*4(Fb?R10P^WIC1a<0GidUy@4f8sA^C!eUM-Uf1U6fwz%w|o~ z*r@rz*`ub0iYh60+asPi`4dXbJ>qB+r~5#j5y^7U*Tf&jE|dV zLRoM?#O6Yp-n}NRFfneGCYo2tSyf!%*H^=FvW9DzHe{GW=yf#O>qwm=nlauBVk#JJ zMt|#xRk0WQ)r2$c4X2T6-d9FyDbLqL*;&Edg56hD>>sy0-Y^Lzo3?;T9iKQeCnw4r zZ?GA*G2+bTf=ZpRJYi-|PL$bL=f!l0VH*$qxFr`RE}4@PWlmUf;qt^K#~W(O?ZV}8 zMwv>@&r9sLN{{b2k`sEa(i3J+=sU6>!THc@M)YhA)(#2@$}d=c1OrMTv4MDZZQTPH zh;#GiZx7V_?YIKNMEGMyCE<%3vqVJW3ln@hGS*XWuXIAkc-aawpmd;8a0HbIGqHZe zn2DnjZf2Zu>j?7(uYIy~$D?}!5z*BQ%cSRt?wZ7Tq6-QRyZ=i;N0le4pkQyBc<0S8 zm>qmf#8g)$mWFAV^(yi!l_J)&hbhN)Qn#K8>18MCBxSQ(SB1K#lgTj9HB>gV0axv) zdX*JbCOywl^=c?jbU}%FD3mFxprgtYRZxOn#cMuZjq%CfK7<-LY%+&Z`3nIv1&-&p zYc>$wgkg3JHP(u5v{IGWe1i?Ud77!HX|HYpk+V6dYcgNL z23TYlCd{MSGQ3_v?J$S!_#aK_xT3g|(T+6!KaQ~`jG3(JS;cF%5Nkt5ekCwhLF>l}@p$a!XFCuXk2Q95#IF>@}CrKovlsueT$;%PBSr|VDF~)t2cfkiz zlsWym*!{Bu%(X~ENsrPVF|L|=+?p)@qcSLyQH%QSg-USc7nLbWUw13Cx`~r1 zY)y|fMWPphsM5kRMlK&hU1+T9H|DW+mxsf3yt@#H@CMi@^99BNu{4?0qi8^bO@m zy%@?ERp3$Qi>l!fdLIwg&}%|i<44Z_kOn`7yQf;DlJS;+;$vSXVwyE(tsH95qc9q5 z3RBha1$eyMCp?UP?Fltgl+idZ7|~CXqkYMU$rq=)L+%9UY~-@UXzTF0hH^w|n!a#E zd13m}5v3DcgW>5MwE_$N&J90oEN*FRY4hRCkbu#O#qM(a1ki`q7^nX2UZuOT9KU(9 zdxIk)T*Kk=;Y+YNe$QBcx1Ld*a6Lph@#8s%Zql%4q8DZ_fl{&Shy@l^P0X9*k8eXy zPv~<|RY4xcczhdW4&3hjKByIr3$}fDr5*0_qB%2NK9{@FELqQ9R$gkBXm_}aocxV= z{Nh6@32Wi8`+WYYGOxS3w9Kra$eu}PD%d%d_A1v*r{7+oeg~;)eAMGb@4EPv3Jy(u zur2Yl1={fo+zR|E*BGB3uhWNL)A}pi4kzAmDx3}%|7zAOwZiTyFLKW|O)GJgxZ@{y zo#l4CxR@&V%IsbwIh+;tN{8R;EP+F1i8PDx<62i`wY%DvP&=yT-avb2V}}U+rntD+ z>GSbxPNlmFcI=Khd{O19aJsA2>yL9LH;d01cEL@hE?t$y?g~%2v&zYtoa$G;$mVyK z_{-crG^Vq{?NtUN(j3m2F5Cv9Q(cv=Dwn<7H5cOmL%RgctpC7M&uru>hvDwhbQ}1E zttUnw_YbF6RmBao#A3pxzq3PV#KQ7UF$wpx!v%k za*p3!G|gFDr5aXGF&5%np0C>DaeJ%6>sI2mmsX&iF#tSncX_v`)s@pL-LopYBaG%{ zEt3YEiRN?gbaZ+v@x$vf45jk0z6lni8o1l0t5VPB_Sk1sJN?CWkE*`i;XqHTI(S?j zCr>Uv_cyu)BV|Ul3*)uhNbu+B+&{r#t5@Ba*Rx!e4)-j-s}jFT4NWXxb&=bH(FSLY zsix;Ks_6)ugL*<_^Ba$eh^(qE;X^2RdkPxRD~tK&;PIipLNJml?4?e4$Da`$73ted z{O;cE_qd8xD`A+ivq#qVx=Xm_+#aV__wJ~(n2jA%my9BcaStm#vPZruhaaOiG!H`K zGSYT%bU5%+bi)(eBwZygw4VN-Dx(RE=dRwCTshRV!d1nCI--M0s>{p$zGAP_$Oq7*()}?@^!Rz*7B3Ky`!eNHh@L3D>mq_68hP7IH?aS zlEr~G=sEZ_#p;fR#`cO}tm=+939!*xzoNaNwx$sWF)nRTWqFJkY+o{qK%S;RQ%hR| z4hWpNBvQG86IOWIc+*n{D=5tsRV!Kp#VwuKjMRzM1AdOx5thJK^n9!)TJ_8;ct2Bj zl3_9wtEbinYFb_BmE|}G(C+nO)$eo7#Wtro9;e@L6j25py=>hVzV5cmSE9Y)9ByO1 zzCONSH}TUs_#50w)!2ePU;H5!ML5O=bZ6T24OuW`Y|TKrfxLUkD>n412EFQ ztfsZL9-|YT-BE)!mwbd1x-X<+S}S{R+QJQz#ci#-r3sP z($*2E3+;sC-Zk4Dr?$o!ZiJM(^J9-*YfF1WN5J0LT3-VjkrAqsxsss*tJ_gkz3(wW zx!z$hyd|%uBg_?j>lPc(lSFOkJ=ch7$61*?M}ocEgjbCA$SXsmyB`f(-4WrL!}>d* zH+x8rn<&`z#yAbTj2J2nT(g*hRV-IeRXiw7GA?ZhEDPz)#6?h-!x8$6+HmOT!p=pD zxLao~De1n9yh5hBKx;?6->AHPp}WbY8L28lpyF*c{?NQ@#qLX3GxlS8Vy<92Qa3cY zR8Hd--dfYn9_n{+PxM7ZrE2w^pZ@N88D$r#igx>=Un8$xk(#BLpEU~`aVlw`2{(-v zOnE5#f*0XoWHKCE)X?0}ULU9n&xF~&sF5d^e^CGr6V*073hk!LZm|4KtIqQWP&V!vjhb8{R`~x7ub(4*f;*e-g;FA_L>Xq z6IXxO+b#N5ZYn(?b57Re*9G1Pj9|<{Jz)w3`_BR-}Td~ht9uc$>N)tEX%}Yt@Y_ zAAhfW)hpJ0g=3RXT=x6wXI|R($&R7!KkmDLH!1T*Y2F~s8^L*#H*ebK4dlGpnm0i6 zMsnVW&6~e@GdFM8=8fUJS)4a=^QLm%Jk1--d2=;yx$`Dy-sI04s4qkN z@kVvt(9RpSc{4h1p5{%{yfL0Pck||I-k8oC;CWL#Z*u32;hT_`H_P*ebKbnp8{v5) zJ#TvFgA4f20=S>Kk8#$6z&Q#6=Ku(}B~AxUFdfVUXMn|^3ABS1U=_F!TmsgC>%c8w zBX|Hj3Z4Ypzzbjp*aQW}@GHIADlgH>%5TEHKG(LkzXnf|5(D>9Y;zOkb@##uJ@>xC_AMztK zJ|T+uXedE^Qj$=IUx_3>GfHTDR1on|K|Y ziu{{uTylQ=8KY6Z;LkE67%x3|zuN#k+@oUbpM){N_sfTnPDBU$^k#ubzB3{)G#Np0 zEWpVF!UyI7Nkg^1$=Q~>5El2pdgTk39{c3TYlcpK=a<_GM?8o)`grrpS2j*9E6DwJ zn*H0ezAoE}NX`0(aNhpc>(c-G-WezF-`_L)c|=M&>z*NFYlauSdHJG>MaQ1;nJk@UIV}GCe~L(mz@DvYwhe z)G{r(=d@%wEjih;CRw^FEGg40y`JfHQL?x&Ig2&=Lc?O2j!$S<g0Y; zA$ppgum4sKuhwmBO{O)lqS??MM?421x3j%1x3LB9FS!lPwT+!PDq(47QEXd!1_Z2KayH9@5i^f{1Eik5DEN)!1Y%HYyY(~HzeESK8`4t0U z{5=iO^`mcw#F!p^B_!tD=sHM@=+UDfF;_e}7U5Sf;JT(U8kFWN q^-AumZ|Q8TD{gFQ4+y`pZZcl#{ZTLS{)o5y#*U`ee8Hc2;{O3H8^ZSh literal 0 HcmV?d00001 diff --git a/client/src/render.c b/client/src/render.c new file mode 100644 index 0000000..33312ff --- /dev/null +++ b/client/src/render.c @@ -0,0 +1,1050 @@ +#include "render.h" +#include "vk_mem_alloc.h" + +const uint32_t MAX_FRAMES_IN_FLIGHT = 2; + +const char * validation_layers[] = { + "VK_LAYER_KHRONOS_validation", + //"VK_LAYER_LUNARG_api_dump", + "VK_LAYER_KHRONOS_synchronization2", + "VK_LAYER_KHRONOS_shader_object", +}; +uint32_t validation_layer_count = sizeof(validation_layers) / sizeof(const char *); + +const char * instance_extensions[] = { + VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, + VK_EXT_DEBUG_UTILS_EXTENSION_NAME, + "VK_EXT_metal_surface", + VK_KHR_SURFACE_EXTENSION_NAME, +}; +uint32_t instance_extension_count = sizeof(instance_extensions) / sizeof(const char *); + +const char * device_extensions[] = { + VK_KHR_SWAPCHAIN_EXTENSION_NAME, + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME, + "VK_KHR_portability_subset", +}; +uint32_t device_extension_count = sizeof(device_extensions) / sizeof(const char *); + +VkFormat depth_formats[] = { + VK_FORMAT_D32_SFLOAT, + VK_FORMAT_D32_SFLOAT_S8_UINT, + VK_FORMAT_D24_UNORM_S8_UINT +}; +uint32_t depth_format_count = sizeof(depth_formats) / sizeof(VkFormat); + + + +void glfw_error(int error, const char* description) { + fprintf(stderr, "GLFW_ERR: 0x%02x - %s\n", error, description); +} + +GLFWwindow* init_window() { + glfwInit(); + glfwSetErrorCallback(glfw_error); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE); + GLFWwindow* window = glfwCreateWindow(800, 500, "roleplay", 0, 0); + + return window; +} + +bool check_validation_layers(const char ** layers, uint32_t num_layers) { + uint32_t layer_count; + VkResult result; + + result = vkEnumerateInstanceLayerProperties(&layer_count, 0); + if(result != VK_SUCCESS) { + return false; + } + + VkLayerProperties* available_layers = malloc(sizeof(VkLayerProperties)*layer_count); + result = vkEnumerateInstanceLayerProperties(&layer_count, available_layers); + + for(uint32_t i = 0; i < num_layers; i++) { + bool found = false; + for(uint32_t j = 0; j < layer_count; j++) { + if(strcmp(layers[i], available_layers[j].layerName) == 0) { + found = true; + } + } + if(found == false) { + free(available_layers); + return false; + } + } + + free(available_layers); + return true; +} + +VkResult create_instance(VkInstance* instance) { + if(check_validation_layers(validation_layers, validation_layer_count) == false) { + fprintf(stderr, "requested validation layers not supported\n"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VkApplicationInfo app_info = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = "roleplay", + .applicationVersion = VK_MAKE_VERSION(0, 0, 1), + .pEngineName = "roleplay", + .engineVersion = VK_MAKE_VERSION(0, 0, 1), + .apiVersion = VK_API_VERSION_1_2, + }; + + uint32_t glfwExtensionCount = 0; + const char** glfwExtensions; + + glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + + const char** requested_extensions = malloc(sizeof(char*)*(glfwExtensionCount + instance_extension_count)); + for (uint32_t i = 0; i < glfwExtensionCount; i++) { + requested_extensions[i] = glfwExtensions[i]; + } + + for (uint32_t i = 0; i < instance_extension_count; i++) { + requested_extensions[glfwExtensionCount + i] = instance_extensions[i]; + } + + VkInstanceCreateInfo instance_info = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + .pApplicationInfo = &app_info, + .enabledLayerCount = validation_layer_count, + .ppEnabledLayerNames = validation_layers, + .enabledExtensionCount = glfwExtensionCount + instance_extension_count, + .ppEnabledExtensionNames = requested_extensions, + .flags = VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR, + }; + + + + VkResult result = vkCreateInstance(&instance_info, 0, instance); + if(result != VK_SUCCESS) { + fprintf(stderr, "vkCreateInstance: %s\n", string_VkResult(result)); + return result; + } + + free(requested_extensions); + return VK_SUCCESS; +} + +static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback( + VkDebugUtilsMessageSeverityFlagBitsEXT severity, + VkDebugUtilsMessageTypeFlagsEXT type, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, + void* user_data) { + + (void)severity; + (void)type; + (void)user_data; + + fprintf(stderr, "Validation layer: %s\n", callback_data->pMessage); + + return VK_FALSE; +} + +VkResult create_debug_messenger(VkInstance instance, VkDebugUtilsMessengerEXT* debug_messenger) { + VkDebugUtilsMessengerCreateInfoEXT messenger_info = { + .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT, + .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_DEVICE_ADDRESS_BINDING_BIT_EXT, + .pfnUserCallback = debug_callback, + .pUserData = 0, + }; + + PFN_vkCreateDebugUtilsMessengerEXT func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT"); + + VkResult result; + result = func(instance, &messenger_info, 0, debug_messenger); + if(result != VK_SUCCESS) { + fprintf(stderr, "failed to create debug messenger\n"); + return result; + } + + return VK_SUCCESS; +} + +VkResult get_best_physical_device(VkInstance instance, VkPhysicalDevice* device) { + uint32_t device_count = 0; + VkResult result; + result = vkEnumeratePhysicalDevices(instance, &device_count, 0); + if(result != VK_SUCCESS) { + return result; + } + + VkPhysicalDevice* devices = malloc(sizeof(VkPhysicalDevice)*device_count); + result = vkEnumeratePhysicalDevices(instance, &device_count, devices); + if(result != VK_SUCCESS) { + free(devices); + return result; + } + + int top_score = -1; + for(uint32_t i = 0; i < device_count; i++) { + int score = 0; + + VkPhysicalDeviceProperties properties; + vkGetPhysicalDeviceProperties(devices[i], &properties); + + VkPhysicalDeviceFeatures features; + vkGetPhysicalDeviceFeatures(devices[i], &features); + + switch(properties.deviceType) { + case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: + score += 100; + break; + case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: + score += 50; + break; + case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: + score += 25; + break; + case VK_PHYSICAL_DEVICE_TYPE_CPU: + score += 0; + break; + default: + continue; + } + + if(score > top_score) { + top_score = score; + *device = devices[i]; + } + } + + free(devices); + + return VK_SUCCESS; +} + +VkResult create_logical_device(VkPhysicalDevice physical_device, VkSurfaceKHR surface, Queue* graphics_queue, Queue* present_queue, Queue* transfer_queue, VkDevice* device) { + if(graphics_queue == NULL || present_queue == NULL || transfer_queue == NULL || device == NULL) { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + uint32_t queue_family_count = 0; + vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &queue_family_count, NULL); + + VkQueueFamilyProperties* queue_families = malloc(sizeof(VkQueueFamilyProperties)*queue_family_count); + vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &queue_family_count, queue_families); + + graphics_queue->family = 0xFFFFFFFF; + present_queue->family = 0xFFFFFFFF; + for(uint32_t idx = 0; idx < queue_family_count; idx++) { + VkBool32 present_support = VK_FALSE; + vkGetPhysicalDeviceSurfaceSupportKHR(physical_device, idx, surface, &present_support); + VkBool32 graphics_support = (queue_families[idx].queueFlags & VK_QUEUE_GRAPHICS_BIT); + + if(graphics_support && present_support) { + graphics_queue->family = idx; + graphics_queue->index = 0; + + present_queue->family = idx; + present_queue->index = 0; + break; + } else if (graphics_support && (graphics_queue->family == 0xFFFFFFFF)) { + graphics_queue->family = idx; + graphics_queue->index = 0; + } else if (present_support && (present_queue->family == 0xFFFFFFFF)) { + graphics_queue->family = idx; + present_queue->index = 0; + } + } + + transfer_queue->family = 0xFFFFFFFF; + for(uint32_t idx = 0; idx < queue_family_count; idx++) { + VkBool32 graphics_support = (queue_families[idx].queueFlags & VK_QUEUE_GRAPHICS_BIT); + VkBool32 compute_support = (queue_families[idx].queueFlags & VK_QUEUE_COMPUTE_BIT); + VkBool32 is_graphics_family = (graphics_queue->family == idx); + VkBool32 is_present_family = (present_queue->family == idx); + uint32_t queue_count = queue_families[idx].queueCount; + + if(is_graphics_family && (queue_count == 1)) { + continue; + } else if (is_present_family && (queue_count == 1)) { + continue; + } + + if(graphics_support && compute_support) { + transfer_queue->family = idx; + if(is_graphics_family || is_present_family) { + transfer_queue->index = 1; + } else { + transfer_queue->index = 0; + } + } + } + + if(graphics_queue->family == 0xFFFFFFFF || present_queue->family == 0xFFFFFFFF || transfer_queue->family == 0xFFFFFFFF) { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VkDeviceQueueCreateInfo queue_create_info[3] = {}; + uint32_t queue_count = 0; + float default_queue_priority = 1.0f; + if(graphics_queue->family == present_queue->family && graphics_queue->family == transfer_queue->family) { + queue_count = 1; + + queue_create_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[0].queueFamilyIndex = graphics_queue->family; + queue_create_info[0].queueCount = 2; + queue_create_info[0].pQueuePriorities = &default_queue_priority; + } else if (graphics_queue->family == present_queue->family) { + queue_count = 2; + + queue_create_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[0].queueFamilyIndex = graphics_queue->family; + queue_create_info[0].queueCount = 1; + queue_create_info[0].pQueuePriorities = &default_queue_priority; + + queue_create_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[1].queueFamilyIndex = transfer_queue->family; + queue_create_info[1].queueCount = 1; + queue_create_info[1].pQueuePriorities = &default_queue_priority; + } else if (graphics_queue->family == transfer_queue->family) { + queue_count = 2; + + queue_create_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[0].queueFamilyIndex = graphics_queue->family; + queue_create_info[0].queueCount = 2; + queue_create_info[0].pQueuePriorities = &default_queue_priority; + + queue_create_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[1].queueFamilyIndex = present_queue->family; + queue_create_info[1].queueCount = 1; + queue_create_info[1].pQueuePriorities = &default_queue_priority; + } else { + queue_count = 3; + + queue_create_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[0].queueFamilyIndex = graphics_queue->family; + queue_create_info[0].queueCount = 1; + queue_create_info[0].pQueuePriorities = &default_queue_priority; + + queue_create_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[1].queueFamilyIndex = present_queue->family; + queue_create_info[1].queueCount = 1; + queue_create_info[1].pQueuePriorities = &default_queue_priority; + + queue_create_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[1].queueFamilyIndex = transfer_queue->family; + queue_create_info[1].queueCount = 1; + queue_create_info[1].pQueuePriorities = &default_queue_priority; + } + + VkPhysicalDeviceVulkan12Features features_12 = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES, + .bufferDeviceAddress = VK_TRUE, + .descriptorIndexing = VK_TRUE, + .descriptorBindingPartiallyBound = VK_TRUE, + .descriptorBindingVariableDescriptorCount = VK_TRUE, + .descriptorBindingUniformBufferUpdateAfterBind = VK_TRUE, + .descriptorBindingStorageBufferUpdateAfterBind = VK_TRUE, + .descriptorBindingSampledImageUpdateAfterBind = VK_TRUE, + }; + + VkPhysicalDeviceFeatures device_features = { + .samplerAnisotropy = VK_TRUE, + }; + + VkDeviceCreateInfo device_create_info = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .pQueueCreateInfos = queue_create_info, + .queueCreateInfoCount = queue_count, + .pEnabledFeatures = &device_features, + .enabledExtensionCount = device_extension_count, + .ppEnabledExtensionNames = device_extensions, + .enabledLayerCount = validation_layer_count, + .ppEnabledLayerNames = validation_layers, + .pNext = &features_12, + }; + + VkResult result = vkCreateDevice(physical_device, &device_create_info, 0, device); + if(result != VK_SUCCESS) { + return result; + } + + vkGetDeviceQueue(*device, graphics_queue->family, graphics_queue->index, &graphics_queue->handle); + vkGetDeviceQueue(*device, present_queue->family, present_queue->index, &present_queue->handle); + vkGetDeviceQueue(*device, transfer_queue->family, transfer_queue->index, &transfer_queue->handle); + + return VK_SUCCESS; +} + +VkResult create_memory_allocator(VkInstance instance, VkPhysicalDevice physical_device, VkDevice device, VmaAllocator* allocator) { + VmaAllocatorCreateInfo allocator_create_info = { + .vulkanApiVersion = VK_API_VERSION_1_2, + .instance = instance, + .physicalDevice = physical_device, + .device = device, + }; + + VkResult result = vmaCreateAllocator(&allocator_create_info, allocator); + if(result != VK_SUCCESS) { + return result; + } + + return VK_SUCCESS; +} + +VkResult get_swapchain_details(VkPhysicalDevice physical_device, VkSurfaceKHR surface, SwapchainDetails* details) { + details->formats = 0; + details->present_modes = 0; + + VkResult result; + + result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, surface, &details->capabilities); + if(result != VK_SUCCESS) { + return result; + } + + result = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &details->formats_count, 0); + if(result != VK_SUCCESS) { + return result; + } + details->formats = malloc(sizeof(VkSurfaceFormatKHR)*details->formats_count); + result = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &details->formats_count, details->formats); + if(result != VK_SUCCESS) { + free(details->formats); + return result; + } + + result = vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &details->present_modes_count, 0); + if(result != VK_SUCCESS) { + free(details->formats); + return result; + } + details->present_modes = malloc(sizeof(VkPresentModeKHR)*details->present_modes_count); + result = vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &details->present_modes_count, details->present_modes); + if(result != VK_SUCCESS) { + free(details->formats); + free(details->present_modes); + return result; + } + + return VK_SUCCESS; +} + +VkSurfaceFormatKHR choose_swapchain_format(SwapchainDetails swapchain_details) { + for(uint32_t i = 0; i < swapchain_details.formats_count; i++) { + VkSurfaceFormatKHR format = swapchain_details.formats[i]; + if(format.format == VK_FORMAT_B8G8R8A8_SRGB && format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) { + return format; + } + } + return swapchain_details.formats[0]; +} + +VkPresentModeKHR choose_present_mode(SwapchainDetails swapchain_details) { + for(uint32_t i = 0; i < swapchain_details.present_modes_count; i++) { + if(swapchain_details.present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { + return VK_PRESENT_MODE_MAILBOX_KHR; + } + } + + return VK_PRESENT_MODE_FIFO_KHR; +} + +VkExtent2D choose_swapchain_extent(SwapchainDetails swapchain_details) { + return swapchain_details.capabilities.currentExtent; +} + +VkResult create_swapchain(VkDevice device, VkSurfaceFormatKHR format, VkPresentModeKHR present_mode, VkExtent2D extent, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR capabilities, uint32_t graphics_family_index, uint32_t present_family_index, VkSwapchainKHR* swapchain) { + uint32_t image_count = capabilities.minImageCount + 1; + uint32_t max_images = capabilities.maxImageCount; + if((max_images > 0) && (image_count > max_images)) { + image_count = max_images; + } + + VkSwapchainCreateInfoKHR swapchain_info = { + .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + .surface = surface, + .minImageCount = image_count, + .imageFormat = format.format, + .imageColorSpace = format.colorSpace, + .imageExtent = extent, + .imageArrayLayers = 1, + .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, + .preTransform = capabilities.currentTransform, + .compositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR, + .presentMode = present_mode, + .clipped = VK_TRUE, + .oldSwapchain = *swapchain, + }; + + uint32_t queue_families[2] = {graphics_family_index, present_family_index}; + if(graphics_family_index != present_family_index) { + swapchain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + swapchain_info.queueFamilyIndexCount = 2; + swapchain_info.pQueueFamilyIndices = queue_families; + } else { + swapchain_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + swapchain_info.queueFamilyIndexCount = 0; + swapchain_info.pQueueFamilyIndices = 0; + } + + + VkResult result; + result = vkCreateSwapchainKHR(device, &swapchain_info, 0, swapchain); + if(result != VK_SUCCESS) { + return result; + } + + return VK_SUCCESS; +} + +VkResult get_swapchain_images(VkDevice device, VkSwapchainKHR swapchain, VkImage** images, uint32_t* image_count) { + VkResult result; + result = vkGetSwapchainImagesKHR(device, swapchain, image_count, 0); + if(result != VK_SUCCESS) { + return result; + } + + *images = malloc(sizeof(VkImage)*(*image_count)); + if(*images == 0) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + result = vkGetSwapchainImagesKHR(device, swapchain, image_count, *images); + if(result != VK_SUCCESS) { + free(*images); + return result; + } + + return VK_SUCCESS; +} + +VkResult create_image_views(VkDevice device, uint32_t image_count, VkImage* images, VkSurfaceFormatKHR format, VkImageView** image_views) { + *image_views = malloc(sizeof(VkImageView)*image_count); + if(*image_views == 0) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + for(uint32_t i = 0; i < image_count; i++) { + VkImageViewCreateInfo view_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = images[i], + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = format.format, + .components = { + .r = VK_COMPONENT_SWIZZLE_IDENTITY, + .g = VK_COMPONENT_SWIZZLE_IDENTITY, + .b = VK_COMPONENT_SWIZZLE_IDENTITY, + .a = VK_COMPONENT_SWIZZLE_IDENTITY, + }, + .subresourceRange = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + + VkResult result = vkCreateImageView(device, &view_info, 0, &(*image_views)[i]); + if(result != VK_SUCCESS) { + free(*image_views); + return result; + } + } + + return VK_SUCCESS; +} + +VkResult find_depth_format(VkPhysicalDevice physical_device, VkImageTiling tiling, VkFormatFeatureFlags features, VkFormat* format) { + for(uint32_t i = 0; i < depth_format_count; i++) { + VkFormatProperties properties; + vkGetPhysicalDeviceFormatProperties(physical_device, depth_formats[i], &properties); + + if(tiling == VK_IMAGE_TILING_LINEAR && (properties.linearTilingFeatures & features) == features) { + *format = depth_formats[i]; + return VK_SUCCESS; + } else if (tiling == VK_IMAGE_TILING_OPTIMAL && (properties.optimalTilingFeatures & features) == features) { + *format = depth_formats[i]; + return VK_SUCCESS; + } + } + return VK_ERROR_UNKNOWN; +} + +VkResult create_render_pass(VkDevice device, VkSurfaceFormatKHR format, VkFormat depth_format, VkRenderPass* render_pass) { + VkAttachmentDescription attachments[] = { + { + .format = format.format, + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + }, + { + .format = depth_format, + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, + .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, + .initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + }, + }; + + VkAttachmentReference color_attachment_refs[] = { + { + .attachment = 0, + .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + }, + }; + + VkAttachmentReference depth_attachment_ref = { + .attachment = 1, + .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + }; + + // Create a subpass with the color and depth attachments + VkSubpassDescription subpasses[] = { + { + .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, + .colorAttachmentCount = sizeof(color_attachment_refs)/sizeof(VkAttachmentReference), + .pColorAttachments = color_attachment_refs, + .pDepthStencilAttachment = &depth_attachment_ref, + }, + }; + + // This basically says "make sure nothing else is writing to the depth_stencil or the color attachment during the pipeline + VkSubpassDependency dependencies[] = { + { + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, + .srcAccessMask = 0, + .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, + .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, + .dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT, + } + }; + + VkRenderPassCreateInfo render_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + .attachmentCount = sizeof(attachments)/sizeof(VkAttachmentDescription), + .pAttachments = attachments, + .subpassCount = sizeof(subpasses)/sizeof(VkSubpassDescription), + .pSubpasses = subpasses, + .dependencyCount = sizeof(dependencies)/sizeof(VkSubpassDependency), + .pDependencies = dependencies, + }; + + VkResult result = vkCreateRenderPass(device, &render_info, 0, render_pass); + if(result != VK_SUCCESS) { + return result; + } + + return VK_SUCCESS; +} + +VkCommandBuffer command_begin_single(VkDevice device, VkCommandPool transfer_pool) { + VkCommandBufferAllocateInfo command_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = transfer_pool, + .commandBufferCount = 1, + }; + + VkCommandBuffer command_buffer; + VkResult result = vkAllocateCommandBuffers(device, &command_info, &command_buffer); + if(result != VK_SUCCESS) { + return VK_NULL_HANDLE; + } + + VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + result = vkBeginCommandBuffer(command_buffer, &begin_info); + if(result != VK_SUCCESS) { + vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer); + return VK_NULL_HANDLE; + } + + return command_buffer; +} + +VkResult command_end_single(VkDevice device, VkCommandBuffer command_buffer, VkCommandPool transfer_pool, Queue transfer_queue) { + VkResult result = vkEndCommandBuffer(command_buffer); + if(result != VK_SUCCESS) { + vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer); + return result; + } + + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &command_buffer, + }; + + result = vkQueueSubmit(transfer_queue.handle, 1, &submit_info, 0); + if(result != VK_SUCCESS) { + vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer); + return result; + } + + result = vkQueueWaitIdle(transfer_queue.handle); + vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer); + return result; +} + +VkResult command_transition_image_layout(VkDevice device, VkCommandPool transfer_pool, Queue transfer_queue, VkImageLayout old_layout, VkImageLayout new_layout, VkImage image, VkAccessFlags src_mask, VkAccessFlags dst_mask, VkPipelineStageFlags source, VkPipelineStageFlags dest, uint32_t source_family, uint32_t dest_family, VkImageAspectFlags aspect_flags) { + VkCommandBuffer command_buffer = command_begin_single(device, transfer_pool); + + VkImageMemoryBarrier barrier = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = source_family, + .dstQueueFamilyIndex = dest_family, + .image = image, + .subresourceRange = { + .aspectMask = aspect_flags, + .levelCount = 1, + .layerCount = 1, + .baseMipLevel = 0, + .baseArrayLayer = 0, + }, + .srcAccessMask = src_mask, + .dstAccessMask = dst_mask, + }; + vkCmdPipelineBarrier(command_buffer, source, dest, 0, 0, 0, 0, 0, 1, &barrier); + + return command_end_single(device, command_buffer, transfer_pool, transfer_queue); +} + +VkResult command_copy_buffer_to_image(VkDevice device, VkCommandPool transfer_pool, Queue transfer_queue, VkExtent3D image_size, VkBuffer source, VkImage dest) { + VkCommandBuffer command_buffer = command_begin_single(device, transfer_pool); + + VkBufferImageCopy region = { + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = { + .baseArrayLayer = 0, + .layerCount = 1, + .mipLevel = 0, + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + }, + .imageOffset = { + .x = 0, + .y = 0, + .z = 0, + }, + .imageExtent = image_size, + }; + + vkCmdCopyBufferToImage(command_buffer, source, dest, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + + return command_end_single(device, command_buffer, transfer_pool, transfer_queue); +} + +VkResult create_swapchain_framebuffers(VkDevice device, uint32_t image_count, VkImageView* image_views, VkImageView depth_image_view, VkRenderPass render_pass, VkExtent2D extent, VkFramebuffer** framebuffers) { + *framebuffers = malloc(sizeof(VkFramebuffer)*image_count); + if(*framebuffers == 0) { + return 0; + } + + for(uint32_t i = 0; i < image_count; i++) { + VkImageView attachments[] = { + image_views[i], + depth_image_view, + }; + + VkFramebufferCreateInfo framebuffer_info = { + .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + .renderPass = render_pass, + .attachmentCount = 2, + .pAttachments = attachments, + .width = extent.width, + .height = extent.height, + .layers = 1, + }; + + VkResult result = vkCreateFramebuffer(device, &framebuffer_info, 0, *framebuffers + sizeof(VkFramebuffer)*i); + if(result != VK_SUCCESS) { + free(*framebuffers); + return result; + } + } + + return VK_SUCCESS; +} + +VkSemaphore* create_semaphores(VkDevice device, VkSemaphoreCreateFlags flags, uint32_t count) { + VkSemaphore* semaphores = malloc(sizeof(VkSemaphore)*count); + if(semaphores == 0) { + return 0; + } + + VkSemaphoreCreateInfo semaphore_info = { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + .flags = flags, + }; + + for(uint32_t i = 0; i < count; i++) { + VkResult result = vkCreateSemaphore(device, &semaphore_info, 0, &semaphores[i]); + if(result != VK_SUCCESS) { + free(semaphores); + return 0; + } + } + return semaphores; +} + +VkFence* create_fences(VkDevice device, VkFenceCreateFlags flags, uint32_t count) { + VkFence* fences = malloc(sizeof(VkFence)*count); + if(fences == 0) { + return 0; + } + + VkFenceCreateInfo fence_info = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = flags, + }; + + for(uint32_t i = 0; i < count; i++) { + VkResult result = vkCreateFence(device, &fence_info, 0, &fences[i]); + if(result != VK_SUCCESS) { + free(fences); + return 0; + } + } + return fences; +} + +VkCommandBuffer* create_command_buffers(VkDevice device, VkCommandPool command_pool, uint32_t image_count) { + VkCommandBufferAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = command_pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = image_count, + }; + + VkCommandBuffer* command_buffers = malloc(sizeof(VkCommandBuffer)*image_count); + if(command_buffers == 0) { + return 0; + } + + VkResult result = vkAllocateCommandBuffers(device, &alloc_info, command_buffers); + if(result != VK_SUCCESS) { + return VK_NULL_HANDLE; + } + + return command_buffers; +} + +VkResult create_depth_image(VkDevice device, VkFormat depth_format, VkExtent2D swapchain_extent, VmaAllocator allocator, VkCommandPool extra_graphics_pool, Queue graphics_queue, VkImage* depth_image, VmaAllocation* depth_image_memory, VkImageView* depth_image_view) { + + VkExtent3D depth_extent = { + .width = swapchain_extent.width, + .height = swapchain_extent.height, + .depth = 1, + }; + + VkImageCreateInfo depth_image_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .imageType = VK_IMAGE_TYPE_2D, + .extent = depth_extent, + .mipLevels = 1, + .arrayLayers = 1, + .format = depth_format, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .samples = VK_SAMPLE_COUNT_1_BIT, + .flags = 0, + }; + + VmaAllocationCreateInfo allocation_info = { + .usage = VMA_MEMORY_USAGE_GPU_ONLY, + }; + + VkResult result = vmaCreateImage(allocator, &depth_image_info, &allocation_info, depth_image, depth_image_memory, NULL); + if(result != VK_SUCCESS) { + return result; + } + + VkImageViewCreateInfo depth_view_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = *depth_image, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = depth_format, + .components = { + .r = VK_COMPONENT_SWIZZLE_IDENTITY, + .g = VK_COMPONENT_SWIZZLE_IDENTITY, + .b = VK_COMPONENT_SWIZZLE_IDENTITY, + .a = VK_COMPONENT_SWIZZLE_IDENTITY, + }, + .subresourceRange = { + .aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + + result = vkCreateImageView(device, &depth_view_info, 0, depth_image_view); + if(result != VK_SUCCESS) { + return result; + } + + result = command_transition_image_layout(device, extra_graphics_pool, graphics_queue, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, *depth_image, 0, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, VK_IMAGE_ASPECT_DEPTH_BIT); + if(result != VK_SUCCESS) { + return result; + } + + return VK_SUCCESS; +} + +VkResult init_vulkan(GLFWwindow* window, RenderContext* context) { + VkResult result; + if(context == NULL) { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + result = create_instance(&context->instance); + if(result != VK_SUCCESS) { + return result; + } + + result = create_debug_messenger(context->instance, &context->debug_messenger); + if(result != VK_SUCCESS) { + return result; + } + + result = get_best_physical_device(context->instance, &context->physical_device); + if(result != VK_SUCCESS) { + return result; + } + + vkGetPhysicalDeviceMemoryProperties(context->physical_device, &context->memories); + + result = glfwCreateWindowSurface(context->instance, window, 0, &context->surface); + if(result != VK_SUCCESS) { + return result; + } + + result = create_logical_device(context->physical_device, context->surface, &context->graphics_queue, &context->present_queue, &context->transfer_queue, &context->device); + if(result != VK_SUCCESS) { + return result; + } + + result = create_memory_allocator(context->instance, context->physical_device, context->device, &context->allocator); + if(result != VK_SUCCESS) { + return result; + } + + VkCommandPoolCreateInfo extra_pool_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .queueFamilyIndex = context->graphics_queue.family, + .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, + }; + result = vkCreateCommandPool(context->device, &extra_pool_info, 0, &context->extra_graphics_pool); + if(result != VK_SUCCESS) { + return result; + } + + VkCommandPoolCreateInfo graphics_pool_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + .queueFamilyIndex = context->graphics_queue.family, + }; + result = vkCreateCommandPool(context->device, &graphics_pool_info, 0, &context->graphics_pool); + if(result != VK_SUCCESS) { + return result; + } + + VkCommandPoolCreateInfo transfer_pool_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, + .queueFamilyIndex = context->transfer_queue.family, + }; + result = vkCreateCommandPool(context->device, &transfer_pool_info, 0, &context->transfer_pool); + if(result != VK_SUCCESS) { + return result; + } + + context->swapchain_command_buffers = create_command_buffers(context->device, context->graphics_pool, MAX_FRAMES_IN_FLIGHT); + if(context->swapchain_command_buffers == NULL) { + return VK_ERROR_UNKNOWN; + } + + context->image_available_semaphores = create_semaphores(context->device, 0, MAX_FRAMES_IN_FLIGHT); + if(context->image_available_semaphores == NULL) { + return VK_ERROR_UNKNOWN; + } + + context->render_finished_semaphores = create_semaphores(context->device, 0, MAX_FRAMES_IN_FLIGHT); + if(context->render_finished_semaphores == NULL) { + return VK_ERROR_UNKNOWN; + } + + context->in_flight_fences = create_fences(context->device, VK_FENCE_CREATE_SIGNALED_BIT, MAX_FRAMES_IN_FLIGHT); + if(context->in_flight_fences == NULL) { + return VK_ERROR_UNKNOWN; + } + + result = get_swapchain_details(context->physical_device, context->surface, &context->swapchain_details); + if(result != VK_SUCCESS) { + return result; + } + + context->swapchain_format = choose_swapchain_format(context->swapchain_details); + context->swapchain_present_mode = choose_present_mode(context->swapchain_details); + context->swapchain_extent = choose_swapchain_extent(context->swapchain_details); + context->swapchain = VK_NULL_HANDLE; + result = create_swapchain(context->device, context->swapchain_format, context->swapchain_present_mode, context->swapchain_extent, context->surface, context->swapchain_details.capabilities, context->graphics_queue.family, context->present_queue.family, &context->swapchain); + if(result != VK_SUCCESS) { + return result; + } + + result = get_swapchain_images(context->device, context->swapchain, &context->swapchain_images, &context->swapchain_image_count); + if(result != VK_SUCCESS) { + return result; + } + + result = create_image_views(context->device, context->swapchain_image_count, context->swapchain_images, context->swapchain_format, &context->swapchain_image_views); + if(result != VK_SUCCESS) { + return result; + } + + result = find_depth_format(context->physical_device, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, &context->depth_format); + if(result != VK_SUCCESS) { + return result; + } + + result = create_render_pass(context->device, context->swapchain_format, context->depth_format, &context->render_pass); + if(result != VK_SUCCESS) { + return result; + } + + // TODO: create and allocate the depth image + result = create_depth_image(context->device, context->depth_format, context->swapchain_extent, context->allocator, context->extra_graphics_pool, context->graphics_queue, &context->depth_image, &context->depth_image_memory, &context->depth_image_view); + if(result != VK_SUCCESS) { + return result; + } + + result = create_swapchain_framebuffers(context->device, context->swapchain_image_count, context->swapchain_image_views, context->depth_image_view, context->render_pass, context->swapchain_extent, &context->swapchain_framebuffers); + if(result != VK_SUCCESS) { + return result; + } + + // TODO: create pipelines + + + return VK_SUCCESS; +} + diff --git a/client/src/render.o b/client/src/render.o new file mode 100644 index 0000000000000000000000000000000000000000..d924f7cee91deddd0b0f24cfb4464b37609358a6 GIT binary patch literal 188296 zcmd44d3+Q_^9MZLJ+r&X?hFa%a)vDJo71wC}GImQNx~5rm>96xWSKgtJywRZ^GZM7CeUSHe^G;8nGHLdNnPaAe=23gT zJ+9D#VnXuvL0;q>Yr6#IJ7MgcThhmj9XoTv?KJ;h*C(d+4)w-5hUDED5>fn-c7CC+T%{R}gEw8}~x@T@@jn8e1kfe+M zB#&!r%iDffcUrhkJK)f7{x^AJ#*d##Eo;lW@-97mOvvyD7iqc#>$^yc{#*T~jhQ-O z*5AnMpP`%o+(36ewp5<8zTno9H*WgW8DqxHPMFPtrF-gY!lSx*PRQ8~ zJ+A$(e2+Am>n642>Ghp5Y1#y;u07wPHM)5$1t9#*eM-}Z_DUVB{-R%vymVURTK;$H zMcw~3y&T4};5q8h5a%%HcK73>n}?&AZbbYd=5k9;^{=YX*nf7u%sZQDWakvN3>JHhN{?Zz^oWE?1E1W&^kA9tY%1|4RF*0uyR$GV zJ6c91^j+D?FtRb;skX)w^rp6Fv583&Fka}%{KavPOJ?u5#-^Aqzhh8`uthLuoUtGB4^W?+q!z*4q*1{V!?+0g*7sl>k1*=eC-Ii*H zAdkvC9@=^=BNqDPmsV9ZDoIVatfMe4rMjY!ezoUlhx=ko?hD-ypl`T6jS*=Rl9l^Y zdJi~c-Lvz1NgC@pt+ zaAq$2Z7RmgQDVPzWgLiXlT%OZ-%;2!$o5c8D{>2O3`RjlCyWJqdP|h2Gq0DZS+AVT zCKWr9ngn?cNjC)Z(1&d=!nVJ$A`g5IY@e5TL&YYPJI?D|5pGZR$wmKDuus*v&>D_Q z$d4Fza_i7KmVnRWLnqiTyILm3Jb}7?!t3ogLd&MMYVD!OUO1l}AlI~f$ zS6UkRB`p*3`^vE2>-(AR=@Pn+P)F_9zj05|eGNZ4=boniv_3c<)z5)^YQFPZ1v|d7C@l)>PW-rwBltU4 zRr8DDBltTAN~nF#`8E8gHmj@oquWLB*RHAN=X9;% z=RKC0wk#xPbekG}RGp>O{J7RN{7Fk5ujUtZs^Kq-;O9i>pBKR&-Jz!a-UxnN`x<^} z1iz?l4L@pW&A2&{dLGM2%M0rtp=T-XyD&elRgIj|zDuk5@Ewk0v|h`4-c!xbX;H&3 z-BH6YYF5LKDyZS-G_B!#E;cTR^GDDZfNm5)p9i{O1l*P;=S&guVhEqZG$`ng(k zUM+fKEjmz(ezF$5rWXB3EgA=ybM$+l7QL(%y|fm+s1|*9EqY!pdQL6+wpujpo00aO zREwTaiyl*pzNr>Hq82^07JXeUdO$5Yr52rBi%zUXUsa2~tQLK7E&765^m(=DPPOQE zwdhv0=;pQP#C7vx`LqaUL!2w~CyI0TemcuX%Ahv1 zzvD5g{<-^lZ=B^rXTHL7pA7<;ErUle?or&|k!L&7!_(mJ&k+XBIFZi>XQscI7k)md z%8cC~mP2QgT(ZIaIBVaFPdzic$}RGX?oX)XxzV{s37jty^K(Pzc`BE*6!~K@hZ3}> zxKrXz)aSB@b73*;UbGPB9>_YfFmy%=$tnrSDhlz#GPQqa)GhhV$glb-bQb&xXFkZM z^GoYc%&+P=en8$$dxR6V8 z4bO$nv!(YYRCd(oLjC-KIYRf?64+xqJwt?SR)sYn|1N2X`QiMBXP%14b;UR*MjLvM zBs&yg-^AKy)3e-}>AQ02emK`JeEW09sr@DOs?WidIQLiP!9VbjUnv^n9%*1ys#v4a zw2ewD3jPTH+aru^tVccgV{AfY-$>dxFe}<=ouv~ zmCKC^vvj)GztCjJX;tW%q_ew z*fDhG&sAwb*yEzGJ-82aNx}G0z-w%*6Ul*2#3OsP1g|63k>r#_^d;MRqIm5uXchJs z=n^TD+ED+x)E{G?(;u?`YyZ&rq;KRW7dn#9<-%s$=-CP5Mqw?#A)z zHoC@^QLxu0)SXb#o5*I9VY5z?CI>sgW_d}mL8{-3`Wk-Ax_JtG%%>CPOm&@vcXDZ? zr<&vy#ss6F=U@1lF<1lgPqd{q!E?OFLVQh%!I})Je*ci0G>GPM+nCV3Kzmef=0Dfd z9!0juO~Se=y_UJ^x*%S4@w$?K<3ch?E<)89nQF#2V4w#J+@nv}Whn z8}iuRu>1*`h@%?5SnXWtXFPf!{A%!o0BB>%R2PDuipdF zJa(vL^i=u9Fd&iK!(fvZUru$=OC|*+A>UxY*T^|~s>mfH6@-QZ_QIPJzevq>% z$sf$k^ao2`@?>|s*r-HWp#C}XBE}0ujCU>MY=*qekQE*~7qU8ozbPc|B;Fl$h0M*F zje}jF*JSdY!Y08@s%fxeic#64_E-_RM~>BxWMZsMwZ;mMa{}XZ!8n^RPG{)L^TIf+ zOK7ZSp|K|N{9}Sz|Nl+*u8_GLx<4Dzo#zGp|7l*4zEd>L%BJ^pvdOvca*BQu*>ql6 z+_YxR%jms>SI5mR(DTH5Mt*Ujkv*Bpcn?yBcOW<`MrHWjM;YFG z=(3Sr)-8fRE`oolQCWJ0j>!t~zWN-|4f|X-+@Ild1eM{NEnqjkSI{;N_cO(SR_<{MUUGXM?It-#Pnc zRi?x_G79forG5vBJ#w9Dfiq$AilQVVyQ&`I0`_BihncH@^I!sp6ro7Ewk{oVO71@C92KI7tD*4g66zk?CzFgr3CLr=~=C|9`s(8_6w@#@u7G1_(_~2 z*i-_$cueT%hJ6sfs-NYO%>8Gk*THj54Bn5M*_+0R{bjJO=E@9r!*gTW@50@owo_yyH311@|`H|5q4T2h_KLo{&>= zEn-E|O+n-w5Ho;?vAKmKEBfw~6>Ph7#8#P+R)!c`bqhX|@j-m;)Cqn&A9fXZpS=)q z4DYB*GZAC&B{?H1cwPI@7&kzFe zUO)0}o6gH4DvB|`>iFIPV|PfXiSJY{Y#w^nZ61obm>>M_zs6kn>e=~&@SKv0_ub~d z#x;5_4#$!s@Q>=)fpO#hc>R*p092STzR`tq}ihtE+hgZ0oi zax9JqXvZ-tG~PKeCsfCJ{B7Khi~%)!%KD_16*<^b0$8tgnawLUsOA-vQ>&eO$yL}V zpfBOy>3a$E-BH*qWc!ggtLwbju29~rxDDPL!hcG_%Zo#4)(vWUL;G|I?oo)S%mGR=MajO zp*8YAZ}K(V2l`x$IqXBMIQN~&Kks>zlRScX9F2&l6#qgofn(PZm%f*2J*h4-uO#M8 z3La7V{XuRaY=-BD#G3ei@|f^Byp|lq=OCT8Ito00V*Tmag5)7b#JNWD zi~qN>=)D8ki0?^^3mrK}`8Mp7CFtBAnKz2`GYfIEKH_EroL{W$l1!OhJ60Aaxhsk; z#kt`Y*svR&Ly;Fmd@Q{q0Y19lOkQRaU5v^y=v}oQ{`L$$o3;P8j6zI*9q(vjjmnmIS7GCwMb#HZe#=%U zV?THfIl~-La8-$t*+p$KuFBb2aHX*`Fo zXN6)O#RNCbay9QOi;p7jY2lu|0=l-ry^s5h^s2%=$M@;=xcA`GM+&w7B3H-f?yS(c zdq-x=3P1G2J;pgy1NJC}uNSF|s|>5)sv>vBRYgrh^9%Kb|6W9MA-MsJ*Ba|}G$fzo z;cF}PaRz}dEi1TPW_rch`F(J{37ttZBVt=-3&b74plW%vxm~_@gJgJmXHP1M-2V|i=;#PhuU@`2Ji@vnqQcMc( z5C503@EtnjV@Fl%n4|t#FP?uPi4ZnB6juhYNdABSSn6yz%Yo=Sd43-79xYtu1&zE$o`ysnv} zDvHr|of=iK9zL`I{z3e+^RI=Eg!0*i`?_<(B77tIkF_xC@?@I^f<%U!D`jC5NQ(HL$2k2xkJ>iaScv-sz7QyE275Qdm`yQe(~74v zn^e#}@t3m!eSd(?20%I+;Qr^c%nrm@d|j{px!>8qK8y3<*wCI>hCM46^P}%=MBy1D z7dp{78U9g`g7q!VgpVO!l27rufu3#2f7FfaHz}}DI9}!!hR(DK-w8r&;uv)!#i*OG zhFUior%Xj{io$auz00dUD`0)-S;iBJ2h`rcT*&_XT?xzucG71B$e=xtzDx2RofS|= z_ac86sSf$}e_0d6?sLCi@z?J;t;@tYI`gmRup*q1=p9D2jp4KSepvmEp|cg8t7zXZ zA)kYPkX|M5f$;Z~c7@Isd=`P6Qq++R=zK=@rTG>q*aGkH_k?pNXh(dCzu3R^IRO30 z2DIOlzy_q#xiLBNyG8%s=jZwV^ZamTIXmCN8K-9NTu1&wdnd&K#6{ja>D>(OefAUF z|9^MRracDhbDzpUOH(dOKBNX8x?e)f0aJm$xFtTA+T zW#@e?_HUy556_UT&oe4F;+}sR@h>M6F)pcDFnr%XgZqAy!o8n~XV#=q*c0hJD^N8K z@;!!ohn`2v5MRqM$M>*SCkl;xFZ}5QJ%`>x&-{q%A4BKQjQq_>BZDWtK)wX`SISO- zdw)c*^f>BH;PWFsKfz|d8TlR`?Dq%CRrnyES9LGNRpbnlaHc{Wpgd~${w#+79ch$M z`7O={C6CcF0maH25V!Pqn~GLI?uvw(^AnW|TLkev#QJp3DIx#5A{1i~$E&dSgkuK1 zHw(v-Jm_AU4t>Vqa|=F`@tJPqa~zt5@*NmwK5!v0lk^w~Jx1vL;U9EpMeB=p0mL*K z(*?V%L(HOBPkWuG0Ppp2t|uKjTChtTka9YdBeB7c)z8x%8}L4yb_{!=_5FisZ{b|b zzVZ*_>i0xCcI6a~!Z~6D&V<9^FD^b4o|CgvQTt2Anc3L$D~$)Rp3CsAWZB>4*jfZX zrW{)mVg~AI{G6obI6vGJ%nkV}mtp(8p>|Pd7l%2MJkd(~_doS{y!hh0ZZ7rE3qg8Ao%W56)WE0({SYz8(QQ0&%YfKd1yZ{vVGK|f0r>kBqPuU-!sI%NoO~T>n-obdmQ*rUS_i($?J4MPn^Yc zIj&l^?_c%1DJ&cDj%BMRTJ~*5CHa4zYFZ;3=S3Q)T6W;yW%mxtJ{s2BuVqgN>)lw( zzK-AlSy9Kw|a#m@Iz2Z4qn{l0oZeKnBIL zzx3Z2>c8=?{hj}%e_p76-e3Fwi5Nk?8XljXH~upIbD{pv{k8uOq5ixkQ6b-gzmhx| z_;;VePY!r&>RXDvjQR-l$r-2i(|cs;%{Xub@t9)`_7lP~>?stFIS$-pR7Tx|SU*PV zM}3R%JqkL@aG%imBHW&J+$-}5J6+(-rssq3m_^uQim<0}ztFQ|Y0HGlGT0_T82C;A zzXPf4i@q(V3Pumsi=N3+z$3nAZ_oS^#BeS{{idoHW>c=J)0o6cJlpX5yOMZ3ug7Cw!n@4Hp?!(Y@+BQY=lS%6{G;&+mB;W* z|LqkCmEX-z$Ulzfb&BtZ?L41`y*Vc4;*=9BaHBTht=&zuvpkAfpsE>WIGN zzl5T=XFj2vqv=j4x83Q@R+tyr_n-8+F;br>*c1Jf@MY6IArnH+EdLPm`~Qs|ozcIi zNXn=2+Z032e;J>iDXQak*dFLlcIkrkBy4kmwolt^jnUxsN%UnC_8a44n|WIKvd6+- z+BHb5Y~L7biM8mAwZIcG^&>9>!WG6_GSn>2lz@t3+;^QMiPICJ{6Nq2IC z3;(9Y#4+P1;Fn%wZ=Eo1HdRAx`i$u_XO9^>2|s+Bmp)QvTUb7Rix~RB*__#v zrp%%q#F{!`_LwQ@v*ygadCa&8VKKGIJI2fyH*w4))>F%wHE!ml8MCL)OvewUCd{QD z75- zvt~~iA6H|MI8;oSIV*13^x1K<=FFgZO&A|-+&(#J=7ce`C#2wqkz=Nfn{YWx3UhnW z@9Bop>;_DjHEY7OTPDn;Utr!mX3`W0pFKTp9Cwc6-%-WkS7W+OH0eHgXi`%0;K5WJ zm^vgqEjh8*2qK203`kBLI)sYJ!;%LMNgtdXL8K*z$drLesRIV}PacxYEz{Cc)6&r` zJ+*gw-_*fF(g!3DNKIpbFhosVujFAVNy&eyO&ORnBqgzb$_EA^bAV$c4` z>AliYh9#%fs52xr6}k)@k)GPKUvknAHmsg{@6@ybi9=ZA;Gu&CrKSyuoLld-#6AO{ zVsfwaL8+j%~_LguAbe`CO5Fkdh7@Z_|CiT%_2CJyY?KVogd!?1yC zWTp(%{Za=dUNP$x4NXaloWkItX}uG% zsEo^mDZO3!X zpE7t5migSSJeNopB74+m6Q1ngzKOjMmIe-ss2Y&iCmGt&l^mmbp=u3~(knSNeNajg zTM6Tnvj4q(T54~yT>=EYczOBuXK2C^?mRU(+wM} zSK5H|kR}6Cd!_VFf%Ra0I7P?==a`vqtmL!-DMQGq&sp2vL;LqnADonyoIDUqlhl9c zU~&)03^`9o&XB$oj?z=$!QuWPRgn?j@Sz&}kFLOdC(3eAwi)3A5%*nQaWK-a_?;q4$IEj{EnB zGtB?=hkVYz{*jyo_${rgdGj8lmAuhH95m@0!M!d#~_HKYFw`Qjozn8v;~2 z;^Gj~7LBgp zJECFdEBl^R2wA&vZl7R;leW~{7(i{4Y#EsqaP)d1i0^&atJWWZe`tDfO1CT^?@cxt!}4A*bOt&2AjHQYLe^x^dhTwDgC zYsNJVY4d3HWx(sgJ03Ww4ESpL#Tu?;;`(L4uHiN{TsIQ8u?#ph+$M%}cy0}4ppLA= za+}IPjEu>m#?56Q*1Za)(2TwRK{-iC%k3jWCka#Tx9nKMt}E<%(yp)U2BzK6vKzT< zzuRu?v71EMO{48*Uc0%^ZeiOk9Xqa$-73az9c#C#Yqzaux2tcrZ(w(5Xm@O6ckZ*F&MVRvt7<421ZwzA_}+ZVO5FK%mJ($2oLz1^dOeOX8Q@=o>@ zo$V{T*jIJ6uRhPd=6pM$o1NI*?s0PQBV5bd7yof}NIV5AJCXNwSCbvWF$x*Y~!E_pwLxwQoqVZ|rA}yw<*{ zzddSzoj%YWoobI6WRJbh9+zg1A8bz;V&6Q}zGav_@p^mGaQoH~_T(GvDL2|vN7~bF zvZs%-XQbP=jkafwv1g67XOFYzjJI!}VBc}GJ@*!S-bDM(N%s6(?Yk!1cTcewOtlwI zvoogKi)Pr1Z?iLJ+Dm5HOK02n%(3si-OjqhUN+ahZ=QYso%Zti_5*j>58iFBSYWSQ zXlG{#`=Ld`et5C4S7i$OktM=@bg8f(yGPio?-ll%EMc!*ChW)W6ZRAL3;W6C!v4<# z!hY&OVdtz6c3`Ej*JTTP{X@du@UXBqt`hdsj|lsjM}?jHn6UF!3wzTVVQ*e5>}MYr z_H$1N`}rq@{lb5Qz2zxkZ_N?*iveN3v`*OD)(iXP4Z?n9qp)9nTG+v7gq@!&?AP*y z{rVT@lhA&Pre0iGTtDhLY{+Z#MUl@vhWhnlQ;YbC;(MpD6zcYOM2g7$~ z7>=K1DEX72w2GlD<1?DyiA4-2S3X?@>icYlA0A@(@nMGYoS&)gRDj|1zQ=w9_0xWa zpARtn@-f4&2N{0*grVY7hRV+vem}(U$6n%QMpOLEXo+7KUE)_pxA=|GBQmz%`;~H{MEMEAXmN_sD_C{MCs=pK7OcGE2-e=I zBUpVWMzH=)tl$Yabp_ABsV8^}PJO|1a2g1ngws&)ESyGyr{Va8o=0QBF61;3>_bjd z!A|5f6YND!bHQ%pv=Hn^PD{a#@}V5f4r3ic}JJi%_|oG;j~oNj_0%jquIvz!YAyOwjI zVBd1$1v{5>kzns~E*9)w&Lx8V%ehprgE>6}dzf>XU>9>P7wlus6@s12xl*u~IadjG zGv{i-e&$>w*wLH>!Jg(MhSssCpm$YnqfRfu+eK>`sOx%#{#mCEGElE(YiVS--Y0H% z8K_^wjWS$Eh&x0E8q{#R7_JK9-Yo+SYq$+V+wqe!&`59B>tw+1!cPOoxWVohqewxQc%BW$Yco5(;j-fC4d8EEdBLVa4uK#Q6_j={$U6(<8NUCXH+^NXvg zZ=rQkZDpVpZ{@1J47BDAS9O$uHoDYV2HNsLKy{UYcCLdY=X@DxUn8fvp}DY52i};~ zg)-1lk9d&`baHv{jRSRw40NvP)7aqcUiFZHE?UdWWuU9;LTY)X44hZfvX#LrqOO*K z^Id6FpCAL>YU(?NM(-&D-Sz0bWZ(j}vFa@Y7rO4DK7C~%zNSxIL+jK}1}@Sq`^&(^ zdX@ua;1WIaK{9ZuYd7^ylYt&Jy*n7L&xwn5zpRGa-Efr?cbE)ZUc-$KwH+=4SGZ#E zZAEp13|v`L-_3BfC+Kg7P`Y@zM%fL0d*H{@y;G>xuF9V6X zbh8Zf)OMIC14()nZk2&vt_Mlt6d6dak=WUAZ6NM68R%WZZDVMg%#eXT+9oq)ps$uR zTLx0}IJe6{Kkex&WZ>GEdHB}bBAlUC#v%8;n6cLiE4IH_7&|~L zh#e^Ij!hMJ#SRklW3Ll;#-@pRv4h3j*dgMM*rDR~*kNK$?Db-H>~JwFc7&+dw{rbm zu{VhA`Ghx$)r;>3jufj`Ed|~rayOj)QpJuE87EhPP8U@t2uF((kDma4j97Jy=&|CR zgM{P6y^j)(7da0SP7t}lhk-YXXBcl0Pcu#w8yP2w4UD&n^^B9nzBM%d6!Fm$_W`Gh zlJ%s=G*Ptk4~#cm6f@2cM;LDtM;T{|toIkA-7K-?@eJT>v2SnIS1NXn2<|7mUF>=O zw{>^L-VvJb+|YdIh30!_XuSEMdEXVnyF>F|5W)(XG#xM=J=A(}W(il)wgL^J0p(cH-q zEu4U8>8ulR&U(?x*&tdw8$}!EY0=hsMznKsMSCYtbZ|C_j?QM$$$3_EcAgVmoaaSX z=LK<|vqhZmY!%&{7e#mHC9&mnIX3ZaV$at&XE-m5y^ODj;1k4uRjmH`G%zUEFy@P* z658}%6Gs_e7snXi5Z^Lx7vC|yDZXdiA%0*i5aqAXp<$;u#kfoC%cAiL#eT-!;sE1Y z;$z0Q#X-h*L`4yew?|Ym?iIf?zAOGuH+D zezA=4fVhwGV{t#@L6P+$wfjUYWBgRy$M~5j*iQWpiINP`_pm5s{9KeVej!dUei=GM zd=)xGd>uMOd=olE6on2E#i2vQ5s~#Q+4rb8^d{Nmm^jS%t@xbrJMjhMaq%T%i70rR z+Lel(jAddM;|Y;bbqf5GViDu_qU0XZ_Xn}(dusoqDEgMLTof~&5(V3S0exERWc*32 z&Y=3A#RbkU;zH+F5%2saE^;bFa3#&RQtUZS@_!e58UGOPGM*9dF`gCgGyW+)V5|}! zG8(evcN#~?oE5(TrObKgSD=#nR?)mone`a)Em?Aw#&O9~Mz<_u^vDy8QSu~Xv^?}Q zwfD-yj6S*NKm}-9mOt`4(2@J@{|Q(}?q`gV!6j55D~rA%{p-qN#(MGyV|{s)v4Pz4 zKJ{-XSDzq#8_AL%DuI4k%Gg+zF*cD|_ai1bP37uuNsnf74P$e;ma&C=oUx^Rf-z1$ z$=FJkoFMr)EZjrbMwT(%R-RyNCr>iAmlr!7Ld@XC;dCitOu!I7nzfB z2G~^|T7?+xoF@-6o-aRV>?S#OJKg2VygyNYfy`#SP(H*MFCS*SNUmbMSU$pdiF}mt zQu!ET51Ca({V$WNKfu$3bGa<}lJE*y%6O$LW4ub9V7ywMWV}Xx&zK;8U`&)hGWL|^ zj7jnoV=sA{FzWt$diVztj&017y*O@6m3c zEM`oVM;Hgmqm0+dV~lCC;3=}(V7ZfVh|GJ2=%KP;d?qwV<-(?&j z-($Q%meAhk+$c*KN6OjH6^JW4bJ394${Uj*%xB$I5-{ssA{+pK-hlZYFwy z%z26MW|{Rm;VrU!7vV%%aDZ@<%>A73R#{O>I9cw?ISHI1%Xbh?m8Te|$9I)eWLzwFF=on@-w=O^%w}9FA7Z>m7X3i{du1_WmOR3^ zOz!!W`1i@ZjQ7iT8JEi~e-i%zxs~xjS^ft3!3uebaiu)Xm@R)|d`SMx_^|wiah3d) z@e%nOSHpe$M!m{DLt@e#sb+ zUoozeUo)paj_Wu{|8V`VG0KoN&9`QbG8p3_eBs#}1j#SRiwrA$q3_Fz%A;7z<_Yhs574 z^BCWfn;74gn;GAcD?g?BJu;hduY8E{U0LxJ@!ykMej$8cZe{#HzR38Ye2MWRx%w>C z?~`j7_sfi>w9g!nCCdpvmZgjbWf|iqGG_zvKb41GC;UvV{Fv~NtoWSpu&iYKT>j4Z zh5Uo@OIdV;>c5gD%V=NuTJFmx{6+@X5EjXN#$uV3NAwZ7jPa-}-$L{;d5ZB{nY)MR z@8p&r36IOwzY&(meGk(9T`Ko8mdOK*CuHzZ;-8fHjNi+g|L{I91B^e)b&Tb5J>x04 zf$_B5$oP|dn(=3O=w<5ni#*KutNfhtH<|ku@hfB=W2M~0_`BT9_=kL!@r+#g0ku0T zD-II=DKpLxR>?(-hFZ)hR3@WTOBj_Z$)vb$s!~Qvl`*>12}ZX%$>>qvGe)T&7^Btd zQ`FC^)-d|iT1Hzv&giHo80)B<)pU-CQ31wSwT`i_%E}>rJ@wl&g!NT9V*_=Xv7!2j zv61?j(XW1CY^;7|Y@#X|o2uU#o2fq-o2xU7E!0`Ymg-N&IJGF3#%ZONFt%0|n=-J! zv{99eZB-SwYo{`}zP(z^*g<{&BDL$N3SOoDoz%*A2s^86#x82hcSLtpFEXB|USd37 zZDZ`FUS{mBUSYgIW&BF*FI0;d<5kWhbiTPr1sE?@>liOl8yPQESx-`Z54DW(GIc-W z62V=uLGH_^!|o3Xdr zx0ud5ebj!&zUlyDipp9`{C?_C9^thrH=nS-s@Ov~KvgmhRKGK(sy`S9sWXh%sk4k} zYRgd?Z?MWau^9Fpq5_OV)t*BnXPDYjM0mYgT|ziqt$dTtcOz6b;|;3l1EOzKdp;o? zsorJ0Np1Ox=uv7bW4bE&nds4K-xhgM`b~R>lX^i;NGd)n5{Sg<8wFQmtXkRvE{L|B%Ybq;vbjsv@6o zmD;nH@Da6_@lo|I<74VQ#?|V5#x?2##J`R-dX;gV3No%&`HUOX>cb>&qguoGw8|(Z`Wdx|F;^{S%u^*L#NVX$l@o4O`x&2A z2N<7I!3yF(uksmRP_Hp=QLi&@Rc|o9sJ1h{q_VQ;+`COJV|-cN$M}l6pYc_-oH3{# zV9Zw!GQOr(FutzJS5W^q)G5a8DwogEZ>p6q5PyftW-L$@uMoXcRWj~UzcUu9qBn`Z zTNN|DrH(MZt+sqf{CCt=#yx8Fr$p~nYZ%{EYZ>2D`@Sds`zq%w;Rhn4K zPR~XA)G5aOs^TT052#AUkJayt2h|^ppQtm8pQ^KrpQ%3?52-4~!zyDN_5WNgV*Ela zX8ckWy-xhER59b%DtMacZ&W^Gk$R1>SiR18M7_ayRFyBI=c;4sP&VPW>M-MX>T|~9 z>I=pa^(AAe`iikkea(16eZzQC6)}FViWz@UM;L!pM;Xi2YJQG8rPeT>Rz*P??vSgDRP{;o3@P0afko0(gIUbj(LBP~$=vfQ@jIJ)8M~NUb`agw+{$>K z`6A=_=1Yv-%x#R_&6gQ3FkfN3(0r9K-V8EcWacwoY?gdV@-8t;880=<7<-uez9#-< zX7D@0%guboE6ki9h`!PcFkWRIT1xxr)#hQwYs`WNh)ytfGA5e47<-zzj}Sk}T=^tn zFLTc}!enzVV{dcI8$|ap_kBUw*F3a16PRKiX6$Dc+(-1a=1#``X6{2o4>0j7W8gp& z|6&s`)hx;*9Ap+VUS}R*Of!!%4mOW54l%!F9BO{YILtiGc)eM|INU5{9AV~^kh~kr z0OO73I>wP^#ee91!A)i*<0!M}X`<83)o&7xHZ$HP9AlPzKseScWgKUgF^)I)eM0;R z=6=ST&75zEzQqhMPBhmsPBOE;C;qKw`Ok!t&4MLMfm6(#j8n~BjML0Q#_8s6#u;Yr za%y**na4QO+{8G`T)B$)v(0SAIcCM!GTvbpttWb}SUVx#cC|&o}oS zAiT@WDkHqxELe08aDlm#aiO`3F~bagM*Kx)KI39D>p0Pw=AmB+mzakcmzpadq@?dX zW;Wx!=0l8G=EIE3%!)^-{ywvk@qTm5b3`vUw=zCpzR38X`4Zy_vpkdTrlS+bM( zxhCf_oIGmL`&MRhs@l`XcnCPInj4|IVs37_^b7j^t;Ok~K;~QqtV?=K^iy7ZE zSHDYk-(jw0EHGEUK=nJ#HH^E=j8};+G)vwj+-;UJzGd!vo9MUA;D?0on7L;O_u&6- zLAcl4#Q3hcnejdIS;qIx=NLaQpJ)8ge1Y*Ja|`1>bLGSu}nnVGST@Q}HP@vxc6__?`+@e6Y)KM!KWF^bEI3X4@64Ty$IaZ|i7qjVvhN3$ntRp| zmYG}m`Q(K8BI8LjV=M8$H%o$qKbWPAKbmEX<>m>-Q|3v=)8@VcYWI_wyNS+=Kbyfn z=v@4Zna}vE`5NPIX2Di^{;4oCo?njkm1fDyguk1mjDMJAjAzUfjAzZ0jDMQnGgg^D zFdEj6jKV5sl-9oOBu7~lnGXO>Ys=$=mbH34q08F$8ll_T&*-rZFh*G)Ge%nn8NF6; z7q#w7j18?)#zs~d zqu)Bg*w{MB*u?stv8nX~V>4@Y4vp8`TEp1FTFcndD*u%Dan>ouR+i2iwYGGArH!TE zW*d+TiY zcZH?%c~@FGpLdm|-@{*R>Ac=Gmd@)XSUR7TXz6@bPfO>sk}RFi>SgJCRGyYoES>MU&eHjwG)w1u z23tDcGsM#Qtf7|9XAQG-KI?i*=d*@eI-fPd()p|#ES=A~(b9Rgk(SQ0-DK%J+bB!t z71J%9R~&8Wyy6&3=M~3>{9s%N$6Gp|I>FNU)SE4xx4OmBd8>(*&VNj@bpGR3OXoi( zTRQ(S#nO4xsg};0PP260bh@SUrZX&^H@(f$`Ma5x{&LeSOMkg(wxz$^G{@3kZo1vl zUv9d?(qC?xYw0}jJWJ<+@3eG&ZoZ}Sb9Y%fKXAYfwrSpo5EdAbP zv8D5BnU>B6F0phTaH*x^%{`WmZ}(a{zGYcD|Fz80dBgiGohQ8C()qvTmd^h@VCne% zprxNHR#-YOx6;yixok`4GQ`vOXv0WTl)RZ0ZYI4``FU&{SI0> z-}i~7^L?LMI^Xx1rSpAphIj1b!Ic+)4PgWi0XN>TR73=(J)pdTe>NyoweW%iD;QVeibpEg!IcF@t zbJl9?{299At3r{{aA^+^u8dWbNt7;~(Nr#-nKWHGGikZ>4&`#`OrqPRGl?FT&csEz zbS5s^rQi5_UHVPB&!yiq+b;bE*>UMRtB#Am#pA@d^qZ$xmwr=H*QMW-)N|=4+WId2 zPY}XO(-nX3gXxAZ<-wpf*Zl@FYozcGw`gcYD^U(kN$aa^Z-F;C` zceJ?xZ7xK8yvyTUIfP$VX@u0s zHgnNtkmX#5Hfd-x7;=YTvO^(zn9Fjmce$M5F1IrR2P?HONYZz zE*%cjT_^Z!6Qf-^yo|v=u0yYT#=6QljXut$)5POlI!!#mrPIVWyL1}t7MFhAJ<+wE z(-V_i`gQfKF8x|>vP-|#o8r>1^`^S?Yn*8z{icWXn-S`NTS(8DAw6e>^qd{ib52Ol z+e3QZ5ow?MV4q_iXC7>GCu}nxcDW0-xEnTK0Gls_%`;%TMXn6t;Ga5>PNplvbe6a> zJkCI^vOn_he+nbBy&|{&ph<} zthw_jBtGWaSwM-y)vjHPYg~nlYhAk;A9uaQ_=M|i#wT6xF#gB2hw&-bUd9~PyNm(X zdyMN`?=!ANjI%&thiJ!JD2Bt?Ub~ zHJs?&64~cf^f}wyc@gbjLi=s7#LKY6E0JwJLYu)+P7rPK(dIR@c^z%ufVH;6T5rNy zJIGoEWcQt}j2LH^3!%7>)?znV@GVzH2Hh)oy4u_yaFSoM~bK4I+4nU8OF}H)zAq5aor{|(v~MYbzBi$}XArhHxk7K!`9G5#HTz7Sw@(qvU zaSJEPEuCoe@uHp29pl*UILC1(ICb1ukr+2tB-V`;t?R}r*K=dZ>btRe4cu6^hHhH5 zMz~7HkE`i4b{kF;l+dgxL7dZ!Ai-%4NN`%<8t1emmT=+-q|=JP%0Aid(B4V7L1j0!o zFr8im*dhQPr#G%~P9IzooW8mug+Ms{2x6RT0dY=$ToarDkUtP5kCPfXouQacE3b1M zrjv&0490YZMAi>S{oyEQ80xQ&-!&cub|@B@qYhG_ZljG6CdZ#bN7c1vM9cz z;a!05pMP%26f62RR5$kCh3oxpxh=i{{j%dex17MN2i)?dcrLAQ%NOIl>(MONEkBNr z&PA!-E?nxrM;98e{wH*yF$|WH?UrM`OV2lqqn6BzkNzCB>Kk0VrMNufmiyxyJ>woN zcURBtt(v*L&2yWB|24OK2XlLve*`J_U_z_h3yHfI6M9s0-;M9+f6PsK%lG2D`&YZO zsKNX3J^W8E0m19sS5vV7w%DkL+Zi8c zcsIk)EnM)?M#*dzwV2fmGMy;vIv_t8V#)x6!Ncse|6IM||Qn{F~jI zQ7b?7di*cAPg3zSZ;XGdy9qp29`gGAFS#$K;$d%`|7G`hDt_*TBfX5K&%5O--ssm* zszQmW1CwRc%ysORB`_sK-Xt2ynQfkN0Z5L2WVhb+^&Jxy-)nJ0LkmbGahsILh>EV57x=DctA{ zx6%2h6#SA~uJGXB2klN}BW~an{AN~6eba4RZ1tm_*)vK}-lEG>%1|b+Z7H)So^TtN zgv#Sjx{XUIctDu{d$)1*%V^&!bvL(MQUA2tNTimn2CdeddOx|1o+Q83brUtm_p{qb zqBiswm)1;@76oo2jiizmzn~n2-$-NRgkRmpaEHn&`;CQX-NqPJLuGGS_@~<#OS+;a zcOicEbu$xkMI3&AJ$)*7U(pQ8&L^?02KU8aXu4q`G`xjJZ#85j^HvlY;7!&VwH|sA ze(xow?{uqJvLW;_JsuTHLX5a!8*mjX zr;f*1#x4~%+|Q=l?KbjQ8?|DkgzfTJ9ei&>#(0dpU)XpnmdJV@W0N-CiVO)YHdCLL zYDI;>zX!fG)a+-`z+(i-ky?%L!#z-6E?}K}>e=dslemI)QZ)1!uTd)yIkJ(*c*ENt z1T=lrW9<0-Iy8f|ZtOv(ocfmA_*(0iU9{jMkMZ^IP^r1fKD1yTh)J*lh7=AT% z60A(C^zNFsiW*8mEzx4-f-X^p3RBA$bd561FqO67yePv8Q%h)KE|!HM+Kqk!S26W% zw=tI$ZLja8xq=)5#6%c>t|04!7!5PK6u>=+Dzjw(k%a8x3Pu$fbK9M16MKqeYX)5S4PMQ zYB@L4%^i}HA2LygTjIFS2X14R=&k$U$DmQ5Xl$e1#B!eDez);2;7O!Ir%yz@V4~Zhb0hP1xqX=sC z!hJ-In*iNa_Ns*+M;Y&MOY)0_2cwKVnvyFzeiCJTAnv4wSWNi-S7L>B+qN)vX=uqN z714yJ9&j6{!xNf;37rm22ul_PVj`r_k{u=D=9`)DVCd6gG6*=o36VdAMBYl?FQ$I% z#=l@ew*xsAM7m|jm&T)`Tk`DSSP^UBVe`VtS8ZBD$n5b+wwJYm!&~0=L>oEh~*j0ucd`eanh&g&%RL ztK4vlo+tF^DxU|@YTD0w9P+mF^w86HYnFkqcD{TGt?<`w#S?PLKO2yGrJkKyam@HL3Kabd73~pZj4GnOgDk9L&O#Y&TqVzFYW2cs{NEGh<8XU~ zqkV=GZQLlI(NvD86K#xG>tYU%XyR7GBF*ZjXR{gl?l4P7HP}0yKN8j6;nTT z8_VQ0@^M4~D7q}XJ`=k~gLq1J>=7Sr+$XnaqI-{M{F~#{Clv1%c70Pq?$~Y zm}#&j4!ezwVY^c-*%+2T98xxhM;?U1Hu7NPCJ5vkWd`B{EjCnsIxIh?Z?y5OJjU{8 zwPpFAyN&H(`Q%^Q!}3Q%daKZxs3-{ND^)F=j-xQ6TS_ zLWY}th8N}wxAAFsVia6H4S7&@a3N;)X?SMCF|$v@GwYj9HLZt2W%3cI`KheDiK#sX z^!$|P2LCCb(LW21?in3z9G1WF=yS*y#ndm|MtOMjK^VO}Y}Wp`ZOg;AZ31r2ayf@0 zfr4IBqe0!NsVxiP%jNO}W=keV)GL>vsF%BN8Z~@lDsHUXi!fO@JD<0H)pyn%TZjig zfdBs{-$paIR^0SxYAb3RS!Kk8b+f&H1xb^ql;QcL*ErKbay@U-4R1yP(Ad5Tpk+# zZ>!7I=9-}|4-Y+Yel%+6{)PQ1*j^qUI_9ou;|dkJnLT$$8$DS7%@>pFqep1Busfd` zkGYLh=Fm+5M@S9J?}f;fsyK2%{(@)_q4fJIlxxgH5WsNbR9xI}a>5BFRqg{qr?WV>0)@`f|&uBCrb5<&P&cXDCuZ#w9*L28{ z*+U>ETZ6!|m?6JKR!u z>x1*2TQ+Rb=smYb)@y+i3K>q;Z(;b~bzg{D*`S4Kc>lc6F!EgThvwe3@rJS0B|mBI z9faC_Zkg(Bv=4pzd;K5k2mb+h-2F%w2YOAT!5wIK$StQu--SZM&)m^VfDInN@DY!E*Vpk8 zPY)#vd|};p`V7BckF(2X`Wx$Fq0i%QqKmtIK7Uis67-jE`C|OdJiHEX`|5XU?s<^7 z@A&GAPH~=GD(~@i6rCRQY{daj?)Akf!<&vb1>Ib-ncKY<4fyR%4AvjzmY%4Vbz68m z!uukq2R!lxU&jYL6+-s%`CI9+lc8K|UF;3z+UQ~*DA!gO`$D;Px|jmx+UsIJUwwZE zUA)%k_jlCA{=R1ZPP#b27w7M+ivxXa{ati1)z{JA)pIB5KFHVIf4(kW=j-9`rW>UB z68znDaj>tK{{mec;_K_bP#1^#`upQ`ahPwA{~}$y-Z#X5u`Ukx4fkK7iz9p^{g>+E z4ZhL-9=dp=Z@m99T^#9~=)YVSZ}Ls?U!jYmd^7x4>SDTYw*M+!9POLyzgic^`0nyw zql;sG8U6%a9Oql&Pt?WnzAS%FU7X-s?oZOin|&+&y>#&w-ztByE>85V_V?DsNxmoi zeRT0wUyi@8E>8As@TchF6ko2tpDs@IJ?p<#7pM8Q`1|YPbl*1r09~Bn3;GA@;%&Y+ z{HeM)(^udhq>HnByZzVc;%wg@f0{1N@xAXKtc$n%_W6hC;vK$&{-L@!*LTQ2%#%q= zIM4T`|9Z{6(^ujjuDSDlC;cNd_by+B{{~&W+jpkZjk<_8x}wuaU0z60;*gahKjxSu2`?V z_b&E+uf5m*J#%JeXJ^mM&f@>@yqm+!`M&d=US~$skg1hoK}|cRYFM;xnk;6Es>Vd? zc9ZU7tHws_c9-tss#>CT(<{xz+T*J_qjfW+o)fCNqjh`8>nB!Ciq`EZub))4L$q$D zwC&`oouhSoN%!KasnNPw($6VX)1!5>rTf&XnbEpA(tTRhoM_$N(tUc>{Ak_W$}5EZ zXH+eS*3FmhGpi1Y*6kzRXH^{v~GdCes0wX(YpPm`@E{f z(Ygbqdr8&l(Ygbr`~0f2qjd*K_XSl;qICiN>SG7ahl(zyx^>V?wTj|O$Abk@?KUL>9U zX;3eg&H+_Jlb1+mg0}Ti=}cCQN?s{(&AnvoomqI zUM-!2XmPKR&NXRqua(Zhw7Azv=UTM5*GuQWXmM|l&b4WAZ?b5jcE$$uCxgjm?ozgj!7WXdc+=v$UZt2{Z7WW?M ztf$3YCY{4*aqpGR;k3B-N#_V!-20_-BrWa((m9G2_d)3#O^f@GbZ$b6`yc7tlot14 z=^R6g`-pUIMvMEXbZ$O)_yyq*ijz{t?o z%zA08Q91N==^9iO>3;_|U#-+;8q07Dc%{-1v#0*g@Uy&9+tYYaTz#j~c$2^JkNEkl zQhT8)@mXb3d%7z5er1xOx%Ld@`$JjjpQU_%B%RMuzCW%MV|VTOsw!FGKB&~r(Bnh7 z_GzWo6Hk0vS*303jBT^|v29m8mi$EO>1KVr&n}2qH9Ed0?r$o!H>(ogR1VQzsVqk)K^l|Yg8!9&ey3>Usv8QJiJjgSQ_`gN^Oa5Jj}HU(c#2j z!nL-p9hXR~tv|0#O4#8{PKa>CBD=O-qB8k|%#!UBeUm>*=MIV5x|F-g0Zc1?wS~y{6jjYB!(sbl+LM%QOUogb6R3d@^9(f zEwN=XqMOb2?uoIR;CVNTe%tUvxw{-56n3U`z zowE`T+lKTmL^w~&O;NICRdZr!xC2}SC`Ji6W1mOO6L)Y8O1$aGOJ5x7~;#!5MFUHBjC3Y(eF)5E4AyVk2UGlcRJoQjd*}C6l9daVj7- zCN?;^i9S|{Z4n!i+)95)IJ;xRlKt@;Gk zykq;sI;H(1^w=j+BaKkSHvGi9`PovBeN~azQcuP{sYq_BFBBR-uZXa@d#7-ISy5?i z1jqhYK^J@s7dO{q&sP{H;99*_M7&m{bL|!Uzg8G;ZSvp9|2NaMImT@`28_{7(Jgqe zv%+|cD;2`4@q%!@%1`_~es0!d>%|f`>#xUts7P+Due?15{ERIP`dH!o6gOvFXWu^<~1@ zJ2oeIh<+HWC9yuS1c4+cM{pp=?>aj)1llA`!rx9D6JYDbF1!r~a?Bu!n zXTn(%TbjH;Pw$GeU+l`{#riVg>>s-^d6_E zk-;Usi}F`1z0crcTG#HVF=pXfy;hXc+5>P#46(}?KNe?~uH98*oRq(Ru6h4Le&bj1 zlhw7`tA=EC7CCOJN;b=icr!J%MSqXm+AUSFx^{W>Rxumt&Tb>HL!X?RU_GGd5Y6Mq{O z+v(b4HOBk6RU>-mvU+CkTz&<#;e z;{WDij{nvejksJPMBUms?7z_BVSjbQO7=EVcnqHllq7Tt3 zoY=ztBiP`>ZG0gmDPh~u)FEnQ#J87QMre%JyT=6i_2wzSYppN zE0O*hwQh#4?QZnJF<`nLujTJ7U3<31SQVE`QfxK2CAZ}Nd+FMABZXtYOg-Mg-+8+B zT#eC%%SsP3ys7*@V$9$-K98Tdx;CIXF;`D&t5n)tXu`&ZLEUXI8< zy7qjHaSbj@uW5~`H~uJB57xC0YmBFGxh(#e|H|@p5UohW_=5jee9ce%SAG`i+TqoS zh58t6e<#Hc$fx*$SaEEh$p~r(k>XYMqqS5MHjPiu@Dak(P-B>$C>6l>EF<2n9sE;q-Q<2qi~E~ri%uTRoWaw2(hK9Y;g zNFI-%b_$U^LO(+|Ppz)CSA9<-zDMfjc=+_{fytv}B+np{M@#3K)kBlV=obpHv#N(B z7wK0C=h@YxNTyqa^PK82b~2q?y``N@=T(ojlW9qHOY%7VUg7ur>Q0$rhwEBekH1dx zH1yW`^@;z2YkY65Q;!cB9*K1J)~foL>3OWKQSjQhwxGARpqC*^Mf_D%ouq4@)fjK# za+epGls|FxG+kS(ukpKiUo0rsn&s5|F{|hRx{`>oI=>Zz`H649&#Ah$Yfa)*itSD{ z$;Gm)=4v9zQ>1g}no7Hr>{3Iq-4T&wUx;;bYY*iw6<@=xJ}62&(GPqc1vyIY}$<*9Jw1!IdiOk z6LM$iqULEgBNzV;k@I!!#Py7yaal%H(nDHCRpYE!g`fC9ewOIk6}5>a7rXFa1Em#d7LNjigT5n~p=74!IsFXU&bu5D7ASc>(-oB)RB z0~nEy^~l=3b^|!7HkrIorupdF!IAi*D7sYFmak_#h07c4dt8ecAM;!B1wZj0__;*a z{#~26gkZ-w!ETlhcJq9^w#Wy&Wj@%gM6g>iY`9X_2Ci*1;IjJk4xACAi{JQ;{9Hke ztsQa&38;POg!EB9dp;(l%XQJkYoCY|JO!C+b?wLvjWcms*>eR>D|@cRS#c9T@q75W zM%UgSn79V*?+mod%<_RwnR$0$W%6oWG=kcD15y4JA~)*VQ5zcHT z+*xScgP+@V?drA7YI_@nq@N+qr}RIZpQm)AZo?<_Nyb`3@6yGI660Typ^xg5Il^JA zGiK;xW=mtNJ9g;f`cC57`Yl7Bkk^KEM&gek@CYsJpu{8kD6RLPcfWBJ+xiX~CflnAbgkE*_`^i}jNaPsYnYMzkFLcA8RBS&v8DJCCsN{b`FT;- zwjN~chikwKdi+TKen1C2DDiLpY<|8{oJZ0oVY3z0 z$=LjwZf?_Uht03+<~H5-*!+f!-wxRPrc8?+2aQU;CC%Sy(3s@gvIolza+>Cy2RTji zE`wT<@5lt$bx>z=xxRz2ValNHYLOE^F4$1d*5lL;gK|9N2c~#f8 z85Dn({lPnWYZZfx$95(iL}Orxy;4JLzxO|apHFmczOe|$fRFX~8T|c4*Y2!N{GxYi zH#ptajrnftCbQf65s%uaFz6*1qL!^vz%BI(z%t?Q~n;=-UeC z9o5$E*k?2aMtt8KJo1l`dQb`jwgQBYqeA3$!~RWnnF8`<)ZIpPMpqi(GSvj z2HSlDtegrt4Q-TTeGcPRH*>-JiPlWjyhxUZs5+PyVKh z!$8_+gz&p`eohE~Naq(UNEzaUL;oN6i5lAe#;-UAM2tR_)2-1x3(TwF)(x#sO+q); z(f(YCa<`C6+FvV0>;(OLrI_90N9gim#xcS}w5BTA%Q!_iwVJ-k-tuZiO>MG|yc(-X z+TDDwnt{nm<6a7b*1Kk~jADhM{cOZvCH+3@t({OA?@clJrnh!xWgl^H$eescBk`MX zR~uSYO`_VU)byHU+$>HSAF)bSNsaNESftNeE}4&UuV!fL)+AOlTC~+_h|=~PhtXC? zW@TfVa1N}AC0CJ})*wp#jM?JqAmULguda#l{pHocHG`7_q;svBA<2Yv{;OtKGAaG8 zT{9-Rs$q@**QpsRW7pTv){Ms+h||x#wTml_`RK0@5$kglE<1K7<6h6u>T43~83VQT zYmx({#vxd`hIH0p=^#TK=g~I6(!tWXA&I}1A&&oOLos@7>D-9KU&r`GT-~^ac3G69 zH4W`Qy6O6tp?x6B)VhYI#p6|bu={FgTht`#jj`JB8oM1DLEu(9G?EaH?Xw>qwNV69 zCruhnFdG<$iL0BCOdHCpo03dJjpN1DF(lJQ^6F+J)5gXb;_Bu#Rt+3tXf<);Xs%U= zl4vXzE|G!8xxyt9sQ;C?Mi|;5#&tLb3^(F;^LJB2+omS5sWDh9KSMM2#G#lCuHHl_aPIOdG%B!4gCO`_0 z+IFbi!Wb`{+uMn|xuK1Y8=YLM=;0@x<7b?q_0%NB8Dq3IM`L?lV+R_yH0!07LE~1^ zvQCWrw{gEvzAfpPGR%e2Ea})_JTIQSE{DmBf*j_r9#s?;M zkak?e`G_5*^WykAb{BC8MO%_g&UH;xs~IpokAa*DKf4W~PLxVW34&8SK2X7tsj*4QooG;~gr zigu$GTh0CMsJEK?={58qry;$kq3s+`>}gbKJH(UI4Rb?xM^a>lbnZl}zK3+?M63QF zB4%OfBRW*GF`*@Yo2z>pTCygwH}!cXr|{L}3twNeI5%?`Nn4rtSmkgP3ZGTA`w_Jb zjBSO7S_+?anx{XN*gDNKpk`F^b9u5QVb|w5hNj1j2?$jPd&~kj*U*;haXyb6+1Jpf zS10y0lG;?KXP=hu*>^L0c5(hq+g;54MVqysp}nKWujSeShPG>U;sAtqa)fhv;hjxk zae__T1!3b&T=9?iIl_pYUtxTXYrx?~{3rgNWW>hwN}Oa2i&gfbO`E_$s#v^Ngh}hp z!ntyfiideP`f=jyDL$%Tp7+P(TFk2CfVe(QQA?bY02y#^;wG{inkY|~ym zsw1pF#)v(x#ZRS~;c3pTwfKBwPB3CeR~Y-_8gRT}9LC@N$MJKqVNBTgB4bX4vEfj; z_-qV~4Am>e<1)rZl|!Ga6zhh@#(jsLVu(3|QJ<`rgF$21V6Fb8O0kt@3?HJ^za^a` z#%T3#OXtWItzIsP8>1#^^&9JAk=Yo%vsVAKE(YSprqi|hQ{`}V%mZ5eX>!oI+2dOM z!G;()8=F6^)jw|z@Qf{9*XmC^=C_G%aDrtVba+; ztfKyJT?}K4@nb6L&oRVlLZf|bMg6&kI8JF~S}N+#lg`f0iuxsnIJIbGCsov+Z~Qt3 zXZOw(^-HC*XL<$eUwcO)7aH0EqYsV&7Z~wc{$6Hi4K<0&C}&$a_2%FCdb71zZ(8uE zjiugL8+ECgTAAaQ8ru5t$fX;Y-xO!C%R@`BAu#2RQ1~Jz!*y^e+zt1`OYkxL0euinH#h)Jg3DkT zyaHcAEXI%thCwsz1pC2axEz+ji|{4HdNHbi%^?eWz~OKn+zQXZ7ohb4<8)d{0v8`uRF zz$tJwJOHo4S5Rr-7yb>~0cUNgmrU8=VR#?dA&aaD;?t@VVINoobWf2R;Zb-SegKC5 zk@X-2JA!zW=q$Jz?u8fOGx!SzR5R9s7MKEuzNyaj(i-<2YfjiC{GU~f1Pu7=0q zV~Eud2iOvNU^Xm*i(whO3g19gU!K8(2G|}J!UgaUd<^=^d^Hrdf$4B8Tn$gar_g&9 z$__NbE^sJZ0QbUM@Eaug*6F6u1M}fjxEWr6Z=r83bR;34g)r1IR1b6PCa;pd~0TuqRvyFT=M`n;wnH1#myS2hlaygoiZj4kyAb@CN)1 z>(C&y!YnuqZh)8I3s`AQ>Ih7L1KV~kHD8uwHEaQdf*_q6rO_b zq4r;tVVDUQ!K+ZSHgy{gg*)L};A_2+&0r6>5MGArb%_TY47bDAP`e)fU@Dvhcfp6S z()!dr*b6R$=iqxt42eXBgLq!7N&JPWFdGhnW8ieS1a5+7;7c&-sDH3I^uT;L1+It3 z;S=~9`ffn^g?5+z6#Z~G+zBs1WFyK3tOsLZ zCs+j6z%%e)Fg9lY95S#6h)1ulf>+=tSfie_gNejtI)9Ico8cYMhD9QSK|CM37c7Qb z;cbWvr~QWxSO81l5%?Acji3#L6X0>sMp8~;9$X8b!r)QF2abYU;R6s`^jpFna1J~G zpFrPDNMD!+r^BQ09jv}7^&R$r5U375ii@HcF-IcW_S z!VA!A3v7kCa4tLtf53)Yk}n`03BCp1h5lR7KEhG(AXNOD{DTAFdiW4l+nTh6L*O3x z71kU}-GQUwHux_LOOZZsEqo3G8`y4zL*YjF7*=g0FJK{D1usKI6B{+K7hDOi!)j?Z zAYc*P2S34j z(6(hP0f)og@Lw31rTv2A;8u7C{)RQXNGq5RH^HB;V_SbMQBe*@3u#*jj!VzK6ATq#VFea2xy&M(u>Ha2`Af-^0KhbrFt+d*Dl0Z)f^K zI2rDNFG1`VWkBreh#i^_VclJ6mq2WHTmzrNI#VbEa2b3K^;0n$j)m*tEvT7BKEm=Ib7(tYAGi@dfx5jZ=Wr>! z2{m&WkH9QA2OfgIVazvRgd5;J z=(~`#gITZyo`%0+_`!^4;WT&(Dh{DPfg|A|h#kr}59Y&CSPp{^BVXZUcn#J%oU#j7 z!Iv=f2-+mL9=?Y$M-nbv3Gc$-qsVu-0=|YJM>F1stKl11{}}YcCGa*3UW9+R6qdu< z$I_3&RqzoEK8|_2v@3`| z91gd^*Ra-=q&u7fZ^0^8Q6AtJco4pV`m33j!2{6y8piQ(4!i*Ut|c#F7TgTqK;3ne z7q|r8hxM+fp1~4$6KZe3ewYJS!;8@8M%oOR3rpccsJjV0a2~t~gKnm6fOFw7_z4Ey zg8i@*o`YVuQmi7X4@BOi&45GUaaiRo${8FFPl53^V>*}%cSGzQ+BH}RFGBTl+7LJx z?u4&kjdw|3SPGxRsP|~Y;2AL9r#*mqa0R>wf50Xm&~LzH@C5t@O&=l$55VUz!~fB)!V}Q%E9yC%29Lq-kop=Oa2I?E8-Ig5 z+zvm$M&D95U@0tzA>UEv;BJV0Pa4Ci@CH==K%RhjoxqpTW?d$a7c< z@4&#HnH#}Aa3MSk@n1+|SOl*?|6eJSa1(q58~sKdhil;**y4BERd^cu|3MzZ#qbvB zf1(Tahuh$5*x)bn0WN_zq2_PW9xj5nVa-T1vO8P?A3?upG%^OJ!}+ir25HeqCoF;o z;SboTA{yBlj)S}52UsT-Wi2ooIRjpWKE0xm6dVlK!TT_1kXXAmAMZG!Ikhn^k0Q|z>#nV zdAMUa3}l>=>hnMhv5g)uxXOCg!`b+ zs?o@{a2mV@m8(S~Tfy#d8axEOR>vkd4c>&hfzik;SO!1Ah&7^-IdBoY2GxV2k?mnA zyZ|e&8I5$oY48fHIyf5H5zdEqpnk1rWPf-Bs{a*@w89~93utQ-9!!KI;c9pmeuE9y ziAH9^GWY}5T{jwOheO~dcmrbVMI%FCJj{Z#;Xe2r`mImi!%gr%NDPTawtz$7KKLCr zuZu?J!d37&Osc0%+#nk1fEb(yPr)xRWJAg%%*TB$e{X^(;4}CO`VEanhQcQMg};BmGT3Ak^#cxsrSLGk2W!E~qp7d3C1hb190ixagRmT;P`L@^AGU(u`2Vi_ zod-w48887Jg4f|Ih;15;tOmoO1#)mL*BA5mO1K|Bg~MT`F{C?;g&A-zJPF@Jax=n& z9pEf@5q^iYH>WJX0yr1$g?B*Pg7`oi%!Z}#4)orVatocX4_pnO!;r1G1`FXTcpZBG zn>>atpfQac&fg_)JG=;ALwsw(fkv1Fhrs3V4EzAKW22E#&<%UROk}R-@5}Hztev86 zzyWXz{14V|z;0N~{YCt}8eW9oV9iFtf+=t`Tmg^4Cs5H8jjRW4FcZ##yWmau9Tp%z zBu%-7UEx4j0*}HEkbsTG(Wb)wa49?nD?{&Qbi+8<1rCCx@HG4Y$rkbrro&=b20y^6 zt@Ir*15SYl;Y(O~8_F6?gKyD)7JsjU|G*pK8dQ&`?SW=EhX3!&-_zlGcnp@qk5JV{ zI>Hp(-Ta*m$HO)7Bzz7j7}y?-YzEuGzHlbo0k1(N#5*V(unp`D7s4a(GxvVu?`j!r zhL3U20M8z_$?Zq^v_T>(Wcftn{?_nGOyTSf&2|No|U`NFS+H7cr znXnihf}dfHiIg*#1!u#f@Ff_Nun)F@yDgFZWP9j3tHa1p!& zKf$Uw@&t~9<8aiPD z41*nDFE|j6f$QNecn%(g4`4Bzj;`I{E8J%g?n={%D~yJLunlYn`@lk2441=G@D}`n zT)!FA0o>amzcKF3;J`hxiN8nlcSrv21qW)l3~zAn3Ah9Ilkf%f;s5Kx&+sK|3e__iD?u9f4lo-M{C^FogJG~K zG=O-8Xb&ua!{H>@4K9Vt;aa#J?tmBJL-+}_y`qu6@Br@B`MWV}1zFe!j)x_1Jv;=j z!_M#x`~g+7kb}Wc2OGndFb;NtcGw#ZhNEFIoC(X|Nq7m~hEL%;sG3cggVkURw873W zAC7|y;5K*)-iM!{W)5M)SZIf-a3Gum_rm)S+naI??XW8x2z49as;xfGo^_W8r+b86Ja=;Af~^fL@pcGvPS6 z8XkwwAi6((U>J;rZDCh99L|NS;7)i3K7g;F_W`6UYy)eN7d!EHFe1aBl-U!rpKwoB~VX7I*|+gg4>8(EA|b2!mlbYz58G1v|k^I2g`=8{rXn3%-X{ z7LqTJg_&>?Tno>`kFdtU)KQoTr@`fL7d!?p!MpG?R2@QjgkjJOJHfti5?l`V!>jNW z49Dib`CD@+^G-;>Na%(sus!Sr^PvSA-~d;-4Rjqo5G0x!aH_!0hfIAsLdVHz9*OW-zm7T$(W;Wt?42=X2dhO6Ku_zQ*{ zNgD%4!ENv%^gW6^fj!{@cmaNg)sH4^VSAVjhryX}Gdv4lz^@QLhA|+Fg&gb+C&4m! z6+VL~HdQR5&4$gO9d?EJupS%*XTX(kH@pB3!+TJDEbS_cf=-wQXTZzw2wVgo!$HvN zILbeaf$7iz2g1p41v~3`^i)SoI{@Gl;6N~usJ*iufpfB+G5HP zRKu_Qe`Efh3H!n=a6WXxqp;yA)G_!J_i+9m08^j^j)6Pie0T;LPoRSVL0wa=!D(iKsX(ag3I70SOy=#UtpX`8o_$7DKtPA>Q`Hjsn8;b=G;u7!u;ZP4Lc zSmj*e07GCjYy%Tv8q9;k;1u`^znAd$QCJRN!S7Ic9{B@n!wvj@IDgw=YuFKvhEw5e z*aNPCyJ0!}0z;M%SJ(o2pc$sY0dN$Y1J{BMkKq4F{(cBQLGSaaFE9kQf>*hg;qP`Z z4Q9c9a4;MJr@;AeH9Q6{z+3P&+zb^LFs6m|VH7mM1UM9Shht$WTnv}O8PFdthdbb5 z_#eCiy)PtPVQttLwt_C$4fcWsa2%Wk7sJi)A9xKugxFHz$lqH2t_34tJD3ND!D2Wc z&VVbR0d9l`;0yR3Vi!?HVK_9vB$y2g;Vifro`L1?8+;9YE+!q3eH@aw$HEYp3^So+ z_FmI=ojYy6!Bb}LGHdt2`%arXZ^rCdgNJXn@$ij@N9N6)Qa^Xvtf|xHZagJY-!@~{ zxx38WzrJbOzSCyTo-=K3edFxC_S$9E)V3M3rghJrJ#$`tSJU`;_3gV%$#hK`K6>MM zQ}>J%{HMNW9s$(Pn!U?(WT#H6pF4Zzv^g_(*}s0u%o)>W&5yLFCgsLuQ|;-lTx&;e zTw7~%OLt_S8MEe(9FdzJ*=N?g8N1J#Hgzx;B2%XCGB=XTrLx)7Z-D{Wm^CA=W9A~;B6UOJpWwPz5?p#ktQ+ix$N4m*%Ejy}t6gTjY z>r6F{ALF^zFe>G~J$w__{RTv{0r#_`n>T3~?NVMS&E2f;U1Uc$j7|*?kRF{4xStv> zHTcPA$H+Ub=y0h^q(kAQp037JTVaafPj#`UwWGUGwyXF`wz;9ePqWGnE*4uNd@6z! ze$|(Y>=9nQ7Kv-cHZ`cN;du1#X_BuDb z_BuC;bKbdCY_D_6W3O}FlmGeah4!XM-Fz7^8?KQfT=mMiZ<-x&-?BcyhZTtcA0jRR zw?&kKZWA-b?Gf4G!? z5jT{Y%p1PSQUUtJJwKJ^J+D@?bsU}@&M4iyRosuIM>lNNyjgZLUk^=+x-@A_u4#mO z)=|*!HHm!mTGCZC1acJT6!#qMihGWFgyOHR`bMis5R`=VmHc zHf4@=6~a_jbBGJL|fr*OqGqkUyIX|W4Lu@v`Y_9{|g zQ{O0Us*X~+yKzKLj)r|i-9D5F>GI%AN4-`T`YEZ#nimvB(5*(!dWyZI8gpiaQ|3n6 zx~tAB>u$5CuR-F%gIYtXtGh8bYJp!dac(P9z`Q#;SXkT+680->mTpR%sxFv!H(3xU zEN%x0TjRW<46$_6>HCH}COBkqJ4iS%?m9?(kv3cANM84d-^C z<>viB@j%-VFJZhh&G6P%J9c;o2!F(a5(-QY!3z9ZZD|1td>U`DdSK3PM~$#lxVsti z77I73n_OnWgw>Ieoo%U(G=8kfmP>)@2QMQ^cqvd*Y)6qAA1`h-qpYUGWewB8Np?XI zE06_?gyv1=jE;{H0bUr6Oxh&n@;s`@v#HATXS!tAPCDj_1w4MGDM13#hbcxwEe!YO z1Q)!~bO}irZ`K6McmuMFT{!`TlJNv&=cdBC?C=qj>~5kcy5`Mhmk(90B5!(W5dKmw z&&8RP_h-6f*g=_O>dgB!UHR&yC!m1#axV}8ZzgOk9n%Iw?~3j!(+;<}h!3kBXJbm_6Y318ksEA~~W%!+ccw*qCp zz6zAtN#Rx|8dz83sq$i0d$S2Btj)GxMSXD(R;Qdna3?>5P zX`GJZ( z^79CJ{7bW((T$IgYfR6WB4{Gu3C$%~IDBxj&>GkK%XtH|GpnSaC&18ho&cR(R|hCZ zIDxs(fL0EPf^*;Y<~KK^Hr_TlQl~q_~OY#QL+||m7N3) zq0~+arC0X=+^_k>LhrQ4NhXg{p9zMyCkZ>rD3bHZZpR$E&K3?!th9E=)sZOlshn_l ztWZumJbvsk7`?M+1SZbvr8|1sBU7j4PrO-&;3n*uoi=Zunc|Gyd@&&F>S=6DcXbJ8 zN2WWMO{bbBi~n@Dwx=^a=D;gGAu8x2wxrrnoDWTg zkZVtM7Wr>WO-^UcXmtvMFms8yXCPt>uxsH+>VLqrd($x)0U54 zPsjL<%*2lT1uJ=lFGT`QAo-d^Nl9lrQf;}GR7X>rD{nZ$X$E5^a9(F?hm^~7rnc=# z=NeO;X7p1{O(dKdhtAf{bQ@xqXDsL1fe`^=3w37=(&A7OWkX>_Z z1v0W-EvY6^KFrCBb*nwqoW?rwTaZzn09u>UnOtXUqnS#CFN`hiKbsjRQZCb(&PwY- zi>S(u(j+U0#!OSXv9v{9-Ay@4uTu}4a#^U`E*(uAg0(DZ&os4;YbEvM8o8AORzoM# zQbdlJwzqbRLS2xz<9gcKa$Swtbh?AAX>9B162$|ZR^hSqbhn7+DA!5}w&kr$O&Lm} zr81wKoZcnqd|>%I!WQq{!oo1Y%uS!(uL)e+_$n-R`$;(QU6Gv%5am_E3w)4qT`wKl4 zv;ekoQh#BN6=bHZ+G>KWhM`d0_M>h@Y%82}@HQlc%0hZ8o$h2=Pa7SP-6u}ivupm` zeWuLcXYRD_{pU;*ZD3b-wntWF^PqaFv$L(WQP$Abj&T`!DQ(R+_H^sm7Mjo~;llpe3z#0-zJCSE%b4Htut@L^2 zBu`_ejnOoNOi>fd5p0^=k!o*k3?|lbJKn4jLU5^) z30$h?Hl-Olw03rrILtzFp)|W>LNp*Q-y(!kE$YNHQgO5kAA*@O<^ys42iGIbJAggS#60a%UBnDB8G_0AT znB9i*U#iSm0XW{qwWXTHQ;Euvnw5+)4MUfw0;zIJY=L0#aIL^=h~bv$r7({RRQM!7 zafXMH^5%YsGFYJ2`cH6KFN}=4n3%blGap&-FdJQO$6-7^Jhn)G0V>H6pu!J?<5Ug}8Ayts{ zD;5=%3f~xct?bP32t=;2t+lgL%;xjfDtaGx^J9&hySo|Migr^@ute`HQ{9|q z$VIB)3g(Qy3P##Znf4TYZYJ9*y52A&OSj?XNI1kq$D2@jIlG`MQpjH@Fk!u76H?jM zlvtfLb4RS?1cjN+bd&OQRKe}FHxMkcncitbJ` z2MjSk_GGxTBv%l&PnxMc2O8=voQ?{sk1Z9F<*lqR*k=gz@7Q3K5G#z};^-BU=~YU` zTnn&*amIx{arR9rxt`@C<=7>Cm58`$Pnh+w^dz~A5ptQ#*Y^B6TPgeVJ_F<64M}tv zEMl08o+E?vy>KFI&(E#Bg)ntp?#nb~Qxn~b@c}j6YwU!cw(+Ts+;IM_m{eIwy?xHy}?=Lm9bW2Q{5S{Dn~uyLDDkH zr6q2jYvr5-@?**om*-&_4|HcUZLISJts~eOT)4C*rjTNAD<|5n91BsIT^iAno@DBg zyX&gO0B_Q%#LJ0+GZJ(uYRvSA@pg%A5|@Bj1N2v9tv`lu-wKz6*`La^vHKYqE4zT( zMu{4&R1+dc!~^sxv98S1xpw0mP#R_XV=rKbnss=_KK3|_&2N_9bM37iVlyr{zUE4M zmZhU6cIHHE&CTKRik{UjTf+jOpsk#GV@AAJF|@3e#wa0(PMDQJ9$W->1&|>S}Ekhm?Fuxi_V(l4{L{ z?IQ>QiKQ0s3C;5Cb9f;*p{ypxN49iN<2nq-=^)h+tgvb1j73M5QwCj9fu+Y;PcoB3 zG|S`C>81uA*DNt8^=75jWY8VZ8;A&*-h~+*aM-RbxFhiixQSzlgxa0S zwWQnFR$0ODJ30JkHdm?cR8V?bMYh0uaEU7L+SSIfpc4K)EopI1HOeytau^t7tUX(o zn`p{Gxm8y}_>|pP2#<_r>?wm!1$8w&N~2uON?;@9+0JpPRrC-=I-8-SS!Y~151Gl1mm4cq0tK{XUPENUW-|>OL+daHDgJ|MbK}{{Gm*H;*Z9@PuhS3Yh05zl?6X99`G7q1G_CH_AbEK|OFcc9RiwqLV>2CelQ>PB z<R*Je<_l+Q2@md{Csc=1Gm^_@0=W%u9VlzZX_NzOxzj zvBFF642yUJUg!{-(hWV$tYewmJgmI6d1p)Zv3E~d$~WAKL#u@cSu zMh4%W%8tj2d?YEy;ZV7<C&>2hDF-}OYwKM=JxuuEDg zR}p!F$R2|DB!sk4Y`KK>Yx%Q>wk0ibt>lRraDe}Ub4FHKaP~O@?Y9e|xw94!g14KN zfmmI9fLwvUe5dSVQ$akO1x4}via4gz?aEI>tVi#Pnk@U^s7uzK(yfrNRk;h+;nZP!bGe(xW4g>`a%Z8-9(?*V zo*^{2N`bn03~c*Jt#qorU__0Il^5S%|ONYBsgq7X4hlyhM~Q{$VA{2kt7OWmYn!j_{St8dH_mgSmx5cN+yl zVT*`c=pR()5^6$KXSa?pHPc>!ZOb&}np538nr6Sgz%{r~&ztkhmG<-UIk6xqIz6+r zdsbe3nskL0PQ`VJPC>k{#H&O-j&u2vCux{(i33b#6;)fGKWi`cHq9{@TC%MTJh>^K zl=2w|T7yg1YPf2pqq(_LF^2Sxd>b>_4nmhUGZHfQOPtZOeCvj1=uZO3*Opi#_K-6M z>kw`DVSa17`@pbV(kUFID&kidN=PB;ZG3vNc;cSapOmu~3A5?8bjo_TUOgFczsAIT z#7$j3CbTM-qdLr2#&{CgH7GZ?BCHx^t_-TRdvnz-=47UisJ%Po+NE?i@jgOf&pE3VVuSVPERbG@KlZ^ z!8$|M26cQb?KrfH+yN(I?H1>JJRv=6A!oew+_a_1g_Uo;> zojqMG?&r@-=4-%q9+R;q_FOXJ_*TA`GmDE4tY;bWD(&(fKhXwOR3NoQ^R$XBEU zB(bfqq(|50kr+u3dWdG(N<(ob&>BKCXEM$5-C~|z#+%o{!x2|8R4?=tXH3_`*6p@a zy@^AeEna&_s77(rHOpgvy#7W!CzxL>R_B3ZL$Pde+3-=}>DVaDUFj_6$Hcp@3U_qE zn^P!PpmIg+lIP0B6Ro_8%6y8lM6GnvrqCFdp0K8flM%dnE5(y`DW6*7%}a(+Dc(Vy zGf#OG8sM01y}{d4N*T|KcZJ!Tmdd&t*b~W)A#7)2^+M$dN#85bpGd*r{voeC*q=Aa znNyhi9^&m!a*x7EX?1#)R^Te|Y85gZ^JtDccz3&{7IgcTN=oQCWT|RX-lXCY0Y#g=T+?!%x~Ix8k6=^V_E*?9(zxBD^FyVwJl#D z5^7goO0=aKScF#hOm1W7-5D#16Iae*g@D%BN7A)Rrw-9RcR>@}G?8Z5K3OF<7OlIA zIk=yktF)9aY!&%XI;=^EpB-i)^tZs8Q;GI`1vQYIa)rb)+CMD60E%Nf!Zt>c;MR(h z`9?u=>}GZz`FL*VX>DUEH=SXS$g3dDvRIY^Ybs^$F_*8v2|JZ)Xz0qBO=DRWI121{ zhy5cZtm&E)n17_CEc@-3iUcH`d9dCm2Uk#x5@@xZr4(4+5GdaQZ%y>kQ08dYM~kaA zmz4pfhrB)k%E}`iF0khB>R`Qzkmc>#9D`{T!(eBoS7s6@b(Bm1rBvzYyTzjsgk+R+PTCY&x$~2`4 z2 z37N6*TTpcE7g-Pcl^lm#Ic%L1TLA@5EpZl2g-U0eR3>N0VS~2>C^R|kjj;A1;GKkA7YW?L!}aLNh!#wd~>Cgb^EpIPjUaEl=Ly(Y|oSeMP{b0 z)k?W&>>N#CS3mz$lseO#^2ehHaw%V>BV@t$+9j*GdCovOtx0A;OKrUjq_L+#yb{Me z8&E9os;%ngvT#Gwd<}#3%0HR;YALIb7I-filsC?(E;y?owdKo>I%uyJs{l0?JRRw? zp66ttc%qOGiI^|>6EF2C=#tbYLj1xNZ{~MJT`eq+MdnZ6lyMl)E%g6#dPsTLs?5_v zWgL)m+rI+mtVZX;oKv!9r3@7LOhj{Arh!dWUI4%gIyorhZui5D*u5G;4M1h!)|*O}?{JUBZB(K}!B3I$e!?S4O

G_kBlg6$_3x^nrqFwpFGr_HZt z<(HVuH{HleP_%(0FS|>qp90r=8{E4+sm@l})%eM|J-eTb3*(@6%+L@%lC}J z8^*@*xYp(#jy8%4Ywv%V-M6ouo|4nq*vdT9T3T@BthyhxH)O0YgNcn_<#v_2y~rP6 zQ0Q@Irdr&rZK5~Lhv}Lrjq7P_>1s_W7Q@BXG~e)0Jo~ARq_qRDb5c&@{#W@%A(56;L%t)#fE@=@a{a%yd^#P}y_oP7BwO2Wz7 zgFw$EUw zJA*$b`PJf9XpZ;7Cmhu>cJqa8*6Yg)w!_ug<+U^5`A!bZrOXf8Ikqdu1YV8S1D>7@ z8b^mZE^49DZJBGKt3(K1Gwstt=KJj&*>H*0n6Wko#kUSqtiw#omo8-*DFj?4N-5&C z_y0wO7|yc|=y@5Z6MMBG_87I%oDCF+7q|CH6*TYdygkAEmRozPIOHW4;drBFrkzD7 zIapDOdXu4`r(ohN1~u8jIk&omc@@c*>&xLfFGx#GcCD$0(?AYR$cz^qs5Q%R($PoW zPRPQ;ZAEH|6V*qbrxPtwrYOm%kum6DBVEXr(65!tzP5DAyu`MhfW)oA!0J%cDq=5n6IiATH zj(mNK86+k)@{&FCt$6B~yDT#Uvj!3FnYWzhtHrG}5KrWpcBzY<<*SF6R|op@wiv>@ zCCfp@6*v!6tRQRw6OvY#0SWFkSk=V27Fg72Xt${L_$c6sGOt(r8H<8mp^Q+(YB(*1 z`4TYaYs;S1pQ1QAZzglUH7)g@2Qs#Wir(>(XdYjMRS%KYwR!E0-j<2C0iq*mbzxuNEWIlZA< zzA%xM;ffC%h^IjEmfB65xpG{djC>M4e{o(^;So+ z*V!Yt7oCF@S*5`~>+60yWRWKSbn%+w^JYfJ6m`)Ju*JPrmVf_`Y0$QaDb@wm60Q@k zK62iu$R}nRm_HXUKd7VOk_c!QT)sTxqX3~|-CuY4C?$tFoH z#u|tD3cY|+>0Z-a&mWiU$O2_ccD!0!EUUJ z<=pwE6m9HjN~vEqr|2zB&-UJJLDS-|yYO{$3Z>rIn}P7o(+(bbvV@&Fs|+TD0$15l zs*vTsEi;i2S>oa2XVy2N)LZY)t0KgvqV)`zJEhfqghDECj9%d1%3`w=g-~w~l86`w zQ^HDFE4BgNol{#P=B2$CM8STiPgwOK{1@nN%{UvpVS@-KQVcDL zwn_rh$zRC(z7tEQ^`a+re)$~HU4ex3mhj@W3VU>2qG*O!RI-}*+fX`lyk=M{E$LL2 zPrLK5soCj?PvyxM9*QH$uD!8vM-d!pOF6TL$&H+B$g&yWq>_5v<~1)!l`EaXDYMLR zzR+TS)WkU?AUgejVxV)LCf(7~UV6DKFtQ$9j#MC{Ngymi{ zp9eNyS}G4G zu642{Tz!_CP}n5a0J_XG%HHEX>Xh{t4C}vSa(UzKI#}gQ1KT)B;{8yk&n|*j9mQzH z%TY?aiQY+Tb(F0+#yHcj`G%7&AJYna2K4aWC}f)(Qp1N$+9YgskW2jI)L#-&N5UJV zbyP#1RV!=XzCh9*&e&rXQSyAB=96dOo#S!x?VZm1+k!;B){BP<%P)6LO`>w-Y@5qk z@wLa$ws4qti4<9#7Tu#~!8YZgm#?zfEw9XM9VxN@?jq?nzQ6{2Nq{xyST zN$17N4%nV3W!l={5Sc|`8v0tlo4;5~)bf|oB#5&la<}Lbk=4Q)R zj+wTOP!3DdBgL-j0GRx625rr=c@N z%OpnSVx;O+dw11t=}ApYw}x0LWKdtA&FS7&Sc9t|gx2Bu{%w1zYkauo^dU6Fj!JS$#>BMvuHrOPpzH zqYDXrD2bs~s~v`K;AM=OFi&;tu;etLi(17)bj}u?tJT@IL-j2gZ!*djK|c3g<_jg= zqVCGAPHJz!PQTs7D}(GKDRPm%%)(YM{}in@@epr6bR8HfXX0V5&GF%DUMpC#{K)3G zK%)GVFiUc-DVuVwQiMxGm!zx0hm)12kC5j?a+z_tktJj6%|dJ8h1cY<|4{e>`EWKA zD<~O__SVL1MjV}!FU$$3VvbJ+`Q|J#z^^=QX{!$HF@N&2qzIh=4WSSS5sEjVS-a)k zCVZB_mMyb}6v>BA=F-L4F{D3BFPE13@SnY2`Arx>J_&Sh^Rb+Z3q z9&5MO3FK30*0869QC?kEGr%V)3SW1SRUPN@5q4I8d2ovT-TaV4-Sk^l%gs1<_^OF$ zDx4wg&+$PwGdbmYl`E51T!pLHg;OG1V)N@^k8&SWUWk)k=OrzC1gWD$2FPYO(Wk1#8nnoS$m%k zJIu^ciWZAl#*{y&7cat3iB|)Ob(n(913naf~VlW4WYv*7evjB-gE zv94n;!>W5eZ#k3gzB_a6E--(3(@`0&@slda;6|~3!|=k94_Wq$6u1hDj|wSb&cx)H zC&vO)b4%e10exh>Nz%d6asK%T`_zmRiExRaR^p0?S`AkV7Jclueeh%x-+X2jhjU5F zk9>P0Ar!GF(v&OQ5E+@zjDM)7WK_Hs*{x)|D5;o{ZZeOGmq=H4$6}61+#7=GC@4jo z@JbJB-14^3Wr$l3tu$rXl*qD*AlE_7=B2HtBb9Ad4SwW3 zOee>6X>NJ0*tJrl?lC+Kc7doX8I{5Xo@Kk{eq&#mGG&EkpKf+%Q0egG)5O*p)Y9HP zfv^zBXLi!^ZQTK#uqR2|(#-fDR9Ik>F3cHG(THNyPZu!(6y+%5j&x%FCW|vPK?LN}-?hY`VL_q2G+QO$z z!#+OcG0T-+YOCx=lC8tHe5%XVkUu-EmUKK?DG=I!>7=q+1aAoD3uVN|5Un?FTgS=5 znI{i}SSJB|L_B?yxg+9AL8s*UOhCN;eY|-~UaGydIU~*vS>M5Og6T>(D#n;z?=*$Z zii=vp8=$%0WX`eJD3xC>7XK4ZjJifsA+2{e6Rr*RY+4T2oX98#6kf|pD0AgSD0QME zHYZOUmB{iLV0)r(ot&^|*#8f@%C(Ho6kE&gZ2h)&cC(CNzCqJTE_I{jC+4nFYDsTO zTjMV40${WKlO1{Q*~N75688o)4Upmw_(W9l(MK7 zdiEc?ixpYhulseWN~Pk=OlZ%xM?QRN76y5g#5xVBRBua|t)Dzz?0VK%DU#{w78~RZ z;yDVrV`NsjfcQEuFtI~(*E3}pwnyhAhx;S7N(pbNR=iBkDL?9NOCjMZ!D^Whp_Z*f z(XCiZFk%MaDzZL3ODTExdX8%&!$(_j4CV5&RpU8lB<8EE?YImJn^wM>N~W27^4WUf zd1}IN@$6a3e5+0?hN#0Yk}3Bc=^`8~V|Wobrz#4^p5fDZat@5?abYVR(%LXDXZL27 zG{rsREO`n@inMl!rFrX%sX-%b&rnYcgyb@pXaCZvd!Z_?oNJQS_L8~lcO-v{Sv`UtNWr;f$ridLqO6Jb*ODokTubJXOrs9w)iN z>9HDc`C3S8ldItE^lCNM5ld^O#$Bb92P_M{TWf+PrRVtd!PZpHl5YYojzdXk*I^e{solP~`2jPS~v z4R+y5f2k5-FHsaqnTl4lbSkWHC51XB>>`MF_lQkK2Cm{k6)JxjosLzKdCpF}9gWlW zJ)E{K6?adqa`%xutYzoD+F)_~$bUV8W}2vr%S z&chmN&ECh&#grr{#f9QRL+BTI;;B2`&J&+BssX}6w@cQl&GKYQ;OjuV(YGYi?G5QB zv6XA}LV-HGS>-as{Jw_g^Dj=uhZ?4cGs@O3Nn;B~tzARlQ1Vv6W$iE}-WV;~BXgNW zd~P+{BcCjCEb_@nPdT=Zf#lmvu^mKlaG!(-Wt+Q9Z~9Lv#HQ}Y`NBoQZcD}YN?f(c z-2eSY8p;=*(9*yHz1@(Ut}*Uu?r0Y$&+S8IP6c8Zr248)SUs-#N&V2eczrb03RP(ZS$?zF_d($W{=&D=l z)bWZsA^9Gd@RVX#2!0~PGig7cvZ9Cg@3p=_-po_KkP6=A79tMbkgN_ogrv0(SzuZS zF>in>HGwg-EOb6yB#)>B6m)xplg;#W+K=U$W!Sawt}a9clAiijp0ImkJ49IKtjkWB zj&T{E`eAyrW@J{f6qp>4D5j{)yD}xc8EC&SQ4EMV?qn`OrnszQ%U!}lKmkQX zc?g2?3gNA+l6SIfNF)iMf}15-$eK-d-QDmKlm@WArS(OXD!x%^MXlB%D)nd8T5ZwV z)>f;u(qFOCiugin?f-jb&fLBC?%gEx_xpW5|9o=x+&gpT%$YMYXWnz2q(>bWt{(0( zj2Q_R1Qx4ng*pRDJ$5IYXsw+Uxf9LjRAJ@cof zHL33Gor1})7}I@I;=pp3(ODI1-qKZATpkrg}MKdIkoQCU1X_@-3u>Q z1#td+UU4Oz?B{#wGT)AMXQR6Nss@NYr&wTcvT_gaOa{#_(VYrviNI~sYqyq8F$R=Z z&NC<1OXIu6iFV4!ojRqsOQnJ~?|0F#I1}Z7B&rcj)`8NRc<;B6?Ex5JJrQI0l0ajqX0Pje02MSIMdOE zM-N`^npsedC4E^r`N|7-l{lrLk7eTK4A~Rs#>A8+he2={7Skw;uhptGH%k%7!A98? zP7OwH;)Chu)#K1UHsvT&FS%N7V9dd;TT&fu(n@Io4;9vKKct?zY<4UYCz+WLWjI*Z z?zv{uBMj08aue_Xb-)6DA@;9>B*i1 za1Z57nmLZFFDsK9#_3{I`YMO$Nt#J@SF)3vd(yjL;vT3I>%jpW-;Ggf@Us(!aNjOO6ssP8UPbXf|WDG`^ON%M{_0y^ZTJ;7Cm>5SB}ruTKIHNU$#3gy3h=ldRq`{ZOaj(^%jlVAk^25Fpg4TPF;$i)JEIS zOLrkS*HiUT(OeTSY*)HTE$weNiP89}3HMISg0(xAOGryMtIN)tW3AfCzI&yt_WXIp z74xg-RbC)_MwYWb-D4nbDXg4_&mdrf)di)YtS_>dIoCaYQt6ft^P1t^3!k`Xj`LM0 zK9^brq2%5*9W#V=>23%~hXCD?#d3dvnp09J@wLn<~nfd-ZYZ$vF!p8+tQA?o8wa7JpaL$>FFQEbGME)T3>I?SULh@ua{X$x8^+fwOpTVtd+840H&WwG$` zqG&orLu)$FAkH9gX`%q7Vt__F9pu5?^D%>5r;3d;0By7)|5{ueYsOa+S=zq5J{^sv zsv@aWczL8YvLcd(>sBWsqy6V@)C#Ag_@b@TQCb(8 zC35<3jilvor=2{AK@~}ki*g@o^v<)L@T;L>7`-#}bTI){c0$3N%lFC}F z8k*NjS;HwWaeIhcP}??g%mj#gK-m^q8Ofvh>NWuvPIdxw54(B1(8{d1Ufo1FZ-MwS zio?_V=(&rx)1a2lqd}^R#5q20;3hh{>e&008D({!o18AUijT3k=#tQ>H_U6|bMp0Q zY>2lx*=r>)Da9-l&0W&um!xx%&^+S8CDuI1lO4bH6Z1KIE$Vd^LAEov6j^;;%<^q2 ztK8K&6PLksNEGd zKa3^#u2!3~YzFvp9QdfZAmf(4SWu#i79I zFvaO$;aVGRO~fL_@%FZ~8Yu|*aP$l($K+;@$toA5fnnDISJ$bot}|U-u^F55%Eow# zJLl={tX=a#>M9yTUQlXSIhB}SFGs_Mb=X4DqqVuyi-%=}hcAb%M{{m6jnUOO~*YJc4PFi`}z62#}QkCbX~bS(e|r7XYi1X!Yme zqwu-txq6K^3@VKdxv2_<8- z1nr!#YlbT7Zm?9OT{G>Pq8rhv`|l_txz{60J(oF~d8Lf(lBj!}YKmMrk=aCBr=)U= z%-mIC{bo96km}>ul3)g);!OrZ%*ABu##JCW@T5@kD_274ZerP5n$8u)%~9uyVj{W- z%`G~M_~uS0as5+gn&^IQa*Z>qTI4LNuEA~US7OQ**Op&d(I{FsJvfup8Lbl6qPy;p zXVo*;)OC-ln^8O4n!;29b6nQ;TWU-lW)Pp0;ilH+Bh};Pqm<$1hkN@9#m&rao^ro% zzG~TR(MdnEJK^GIHvKj~d@@V+t;L!Kr;4X_E%Di_w;P#=x@$5aMb~6gD3PvYR-tUg zl7+G;!d_Zz{!$FL@T3sefUdov+V@37bOkfIpu_gQ$_sE!H-;)zmF66APARj=yD4|hfL*2}XUHzo zRyj{xQPjEKv-4Ogzoj@HizjOn$ZFPx>^fr9gt?#E`Q=3+lpER2RoP{wP;+xNQ!I2m zDPg69liar~STa8sIt#x}h>CHoQ5O*w)y>GC70S&+`e)MSB1=(SL4HAgCp>USdU8Ha zUlrufoT{p{HPI#2!J_jQREks8F8pQiieR~0*us+PB7E4WsFUbTl1@`0jv3ZrKR|tX zg+^pKlN;?sj>RS(iIsR5QiJvnClyMcUFdIOg)H`f*}>Tv=6B zT2e_H72SNXa2PDAUgYXmR+%}OZ(}Ju)Qw_pT)974f^EY%42cbnSm~~Cje{v^XK3ZT z`f6NL*BP}V<;C&VL_8HuM+#$!=CH&3()@?GzzP#fH5G*z3Ke38!D<4W6i8!8T)}}$ zu+ST}80jp^YIMoFN9tmFoZNC69x>ppr&ULlw#>L!?u#@Bd(^A7bsHT1)HQbTmlFgc ztxneJWVK5uS%EE`Di<2r1ybFe&S^y|D(Q}*&grsFmAb^6HF5%tDtWT=BQ;~AW9@ZG zN5g^~{LJBwb$mp$Y}KrBo3ysRxjpS}vWPv1W|rq*o;4!MA+U|EO9fcQMLGCc#J3NP za>!}fxL8$KJ6Gs6^I>MIx_B0gzb;7Z)Rn<5RC2evyjD3UsdYpuc@U{}P$UaUcFm+M z2sB~1AR1X|pEMLlma}FKGp^R@!;GtS4l(138;UI*^Njmsd{|&U?$^uI)r=p$E1%Pk z_~rIhRuyRz7PSqnN$tw$nl&*o+bYbJ_;$7W0=DYMk{H>(v^CD%hld;4LB#Z6wzK$b zU$%3RJ9lxHSvIvLb%p74xUsnv$qUBAP1OnFV=1=u-9R)G;`@O)yo!T7m+2C$1Z^0$ zZ_XDv=i-lkgV_e>@@8Y<;_aG@+H^b_$w~)V?ovvKcyOe|^vor$NkH=E3Y;S?wy8$gsLkl&?5d0f+weqxQQ7W{&2o#=%2e7HH!4eNQ3G4UZB0e(%a%owL2NdPk-QLH zbftLRHi&H{;>;vI?IDbTt3SRlR85B-a#LjDZ4FmNTI0#pqPL+f!2Rqz4H)hP!|YCFRh*4M!> zxw+EJC!G>e%buOlP$QhpC8Nof!p25y6|?r(`2JpVeOl#~n7l&EBWjuaK&Ct<;Np>W zMAL;voq2Kb%IZ3~&`^nn5_0>_wTOR9}+IlMAU>ShVC}_e120giHu_ zxmVK)eP&i(X~n@mE29M+QdnG#2^i|&Q$I_YeVO1K`*+M3*c5Y`b5Libx09PaZELUG zWNjD3q*a#PxnN<2v&W%oGi(SJ%f)AIO-gCdrEUI*y7shYYqR zUS^d`0?e4~x^$t;L}Mp)?PIGpzc%d{{F<$&JQ7-mUJkw!MXsvGIVi+ctET1UTIrx+ z+bV5jrzC75-Dzys57CUqd>nV|euz?ReXo~W!#WLAURsTn0vt2$l*(3`GaEh}DMxP> zZKg?!6kz+ad+Nj${N19=vI4le10Am%d{I=a=ic2TH|?~$cTsg+U3FC#B|(^N_weX6 zn7zjwViF0L>RcL9mfWFZ7c&hhlofOkyPMnG^f?RwePhoygXM_E+AYl#BQ@vXAT~2y z8Bt~Ca*JgKA?9w2zZBHj$(B{x#dHv{qw~zwpC%40^0Q@e$dCQ_6@C9>LaM>2A#+FgsTcA7^kHSu^X`$UGxo{oLYKIZ9!K5LJf3TkEQDt}qT^(=|)$~<}NgPag1iEWX+Uq9o{P7M)k$86n6X)I31SpHMePUYYx zm&dd20%Wx#-6ads6^!o0IY$oe_4b32ksxDJnf3y)`(l? z9VsYxHykn9mO;!BKzE2c*IF0JuB;N1+YVvOy6sqx$wkH{AcwTBBJjPQxpmoz@#t1O zS+zgQfc$4{x;7&J8Q-oA$^QvoWNF5j33YrqjWQlo^RcUP*&g7c$I|HVM*NyJ0i*m!)J^uo92aEBP#89Yj0f|NorX7z#jgzmS8gN5;_kO3@?qu z#6bfsAr6QL2fLPq6UmYgPl9_R?P@}f#KUQ=v6U7atelE7E-5W8&iZm3^5CzXzjPzE zD%WKYkp=Rx0*e}0Pi#jbaN;1XwYR0B%iAJN!(;Kb<>E(6WY}m4$`w)$beuUeq*R;I zkZ&1h&b5p~S$kU}ou<-K(KQigWE6IO1lh?)t~j*S$qWzDvScK}B=l(Y%0AS@gxMbR z;_2uzoJ|r^fvcjhaJM?y$X)A8&XNTWRLteEWh+CGWD;2%ZHuNuE3tSMU#T@VM;co~ zE5fm8lMp2o3$KnOQ(B`qP9KW$>3S`drt>zT1ugR1e`x#}>TWAFq`cv96s1K4P z(#;_it<*>|($*A7h7#cv!pWsOs9-tdPC~CQ!&COU;V*SWCD~ zD-V_}cC8Er~8Yt519^5!($i>+&%&UewO z)T#tfRO(syh!Hzn1%`_(<4uuNV=|f$wTmU?lHJzcstUPq2{qE0EDfrmavffQvK3w$ zizu1}EkUXgHAx)9Os1nynFu8;ChZ1afZ9TaB}&J;XeWWFMy4y&(B9UhT@3Zfdn&YW zB7thPEDn(i6Nwm`05X$W!r~wz0IecW922E&C`=LIjXH-%|9AyE^EE@jH7*CMzei?oprn}q7PnSkIKHxfUkF_lBo zgrYRvC3Und(Viw#)gECxld(JIlN5a!%)OT4i#fD3-ASQrQV4b~thJ*tkxy)p7PQ#U ziXwOH4k%fYl*@~JNonbLB9v-G&C=v=B2!SDEG%WHDH&Z6NviBx0b3AHR+exK=7VT0 zWke#s6TI=E7@Jli_4S~m)^nz{J%fo%YO!dmxC6zJgrIb84X+YAmP(>1u9WdO*qG&p zI-2E%tw~vK#qCScFJX>_$z*u7$nzAr)kRiCV(3O--$JRZ8^XE1ElRdQddv@_M@huA za|Pzr;abz4YA#06#1${;R4v+WB9$8xGL5?|V8lb5u}nIBq)`)TE{BC}kKycSdNp#m zL^MW%Z7LB?rXqz+O|Szgv9(*pRzwxJgv|-IeMfM+4C1PZB*j{k#Ye`T!`GJT95gl^ zO#YB9W@>jHHt4QfBv)jnVu_D&C%?#sSH+BrlR?{bqY)C(ahm?p?CFE!-My z%;s(_0oifbm6MZ+Q$038m`6r4HeWZTpo*p=tCUYNy0kql6=U(BeMo5Ag~%3Xs8`Zg zFI$$1q$#}?Uk+3Y8uU+0zv}ihat7^TB2K&R1kXC^Ud3XfVJVNaMUvu4ZUsU>t9MN} z-Po*3uGIsTD4mSQP--nQ2g++cf>;gyx>O|ObH3A8{o{bZD+M-;44 zRjBs{M>bBucvDd{EHXhg2W`<*9ECrzT2wQ1-xKkqtXZ^amtM@KFr`uVo}7f)Ps!#< zxz%!-L{eI#ZBl(z;Y48!Ls!a94X&1EPu6^!ugw;^_}k2!i@)HWu_(4zj=MaZZjK}! zGEb6f5nhF2sm2@&Mb{n~VsWdG9eukacNCUog35DM z!h(xzr8xzeiQ-jG09oiuX#8a%z_2XCtQG^j#%L^;_q@2J?%0SRr4Yp8uca)_O16|% zUs4k0$ZQ;~{@F5HecgmkS(@YXdyt)5lwkp<7)H^g{TE3 zMVL~eyGk(raPh6efHNv{zca!qu55(#u;9^z5Z7csFOIn^AxN6~7LAx=mCMdlne0Ov zUXR9LFut@sm6ly^xmi_PMg>9H-+E*xCO4tyR1)JP6=X-Jc7$0P$W9faNMt)v*Mk=L3m7QZZ-OrhPBr4~ab*nr{~ zioARz<1b&o5K0|$-N#xcLe{?wgM(F8<97A%YgmPf=k@{|^)V`Fvk>NfPqNCwqD zu;9tUOjDA^xEYK2RL`J$YT!!iiVKivr?dDpY_Md$i&s&x7H@2fwNq%+q2y}W1V%J9 z&_hW=2|=$SCA_s2qBok^3r7<=sFhS`sfNS)iB1KzAvTYc==50og9xR1HX`k^%*(zJ zWwUH9#0b18(xgcS*6y?Nk`q3tpCYW4!cx3ld@;M)v@n`(j<=^J3m2M&uqUt_*+jNR zRFp;@4~TvOl`9s_w7bY>VfIY&sX5$d z=5U`pJ%M<8FYl}G!epS;7 zU?S%i5L!eRl010+$l^q);h4%qG?qq1kjajZA`wew!d>=#%{CwX<0v}6)U`$bj%smT zl5Qf1B(06AnuSFk3)`thn=&a%wI`_an^BZ$(2;nimI(lG}|RH z`f~9QM!~H7Aef0vk~7(uAf&NS#<;0T2|mHri9up>DaHy?rcQ`Co!ukOHpFOc+0h}c ztoL%_N<^QVGRquyMVhh5IVs(=m6JG!!gA8*(3uN;CO6qw+ca38!0dFSa2c*fByUUJ zGqqKTT@zad4&5b|#iL8=Yz1{0BwI&a29X5yZJ4cZTG$+oMQYH^6um~XPOx5T!kBx( zg5hd0Y|qx)0&Z0@^L?_~Coy?|AydX#luXs#D~raE6KG;FRKoOr7+potQOC#?-4@v& zj-m5cmKk+o8QU_utwsVIfGdCd(&aS0qIys?%a3+7f`%2DFUN;+>?(t$wB~p!4Ktfu zjb;}Ui6k+Nm&|oAZK;@Kmv)!uu-)8>!wKrM&W|Kw;ni*gqS2LXG`ZBrQyl0<$Ff}{ zrM#gui@Z>-q~UJB^y&qe0a_}aifIW~^%mvylbamh zfSJD_je05~;e;G{(#i^EvD6NTV#H0mUyxv#$Fdk?oFS^aM`jxa?~yRYMaibjlAO#< z6RMLWoy|Uze1#Ik_M6r)?TT;>pP)3*;6*HMX$jc}Q0&pKY7IM`R^mh{(LxQW zZ2WEM&2nDQBB_u|@&T1ph8CT$tiV=RzA6Jjg{ugOqHm3MKK3PGUZ3XDmEfIGs7$a0 zgC^8$yqVi#SR`vc4UD9iwoqnGv`B0tP&*CYsu{ zCD87sN+XeJsbnLsXf&nL{3P;TGRV&S0-7r@3qYq-Dxz#AWNr$$SA|q(6BKZ@b(AV< zbKJ|MU98TCSjpLON}-}6lVvrvrp^+9v!V+xB-UXTgiUP~)yh;X(O86YFm?J!o)%ZZ zvKy_Og>P1SDHEP)5m|bBqnbs;`H=}`ZP=}PvgMdrPGm!$i%w2IbxxxtEHzH3K+;>Z z(^=Fx$gDBvAki=aXp6|}Ol6{_au!D0rWDAXxx__w>I*X)%M#T1)4+wU?w3I%wH(CA zxg|G0(L_qYas`*IsM_6GtM+@A@ODq91JBqsh7(~iw2-rCJokr@j_FFVstGafe(-)J z`PSHu4slyr49CO*uF#o!Wm10SZhoXm&a7b@6LnxXQq#>$EVDK}FqRSXYY~_;>rHjw zVRtY&m;o&IQ|_$9bideZnc@jWHO8R*uIRV(h(o+Fg}KYs+(K)m%FOx6Q3`g_VYB}-ik5p;(bB0vYOA3cEK?yI{46aI`3T0mav+}NvMgQ7 zDg8Ldgr6AIi_s)DIg&&|oAR;N@+kr6Ba>G&w98y3s1!jfN?IdUL(z%T;;|-EaLJAN z^DbdF%QV7;rYb3BdZvyKp{U#jYPx6AZl$&)Tm%#SZy}uAA;e{eUa0d&Z7#MaM!DlCMpqEYnnUgx)yj>r(g)jd zMd9RevaH(5(PD3D8Y7&n6FmiUOwB8{4%ek(j>9ApbJuF`wRO`qZBa+bP1ev3Z0ifO z9yag$BCpH>Ke`*{Zs8p6G6`6-p=-iu&X=y)!UkH}W5Ab(@f%Hakteo@o1L6S+65iT zL?6O9+%Cd=8kp0bY>SYjQJQFyX|;!@yroU%V(kF=Nze9VWIM_RrYarU$cB@z{!p&CNQkmGO|3Dru!Lq86t4>iKT{ra z6pU0#3E73qCD-#zxwKVur?ZqPY!+44NJeWgpEAGMhZBmDkdcVJOo2#cXFM@ih|)Cc-qRppsU~p}5I9tjY5btQ8yUB04D{ zI)tV`l8>eNL%|eFf%Mf!H&Um!%miT$SZ~&CLP)uF`quUaJ=oXh z9106Y!`LjS4o#>+V|T@wu2_>gZ_qYQk%p`1+@@~V5)ixyCNpsB&BkfTDJE3B60!&C>hR7n2Ychh1rzXDyO_M zljI`1&FN*lp}NYz%-zCC%9EK3DFoJ_~B5nHk$sV40|S&*zZ9^;6llLgOon`PhM;$Z8_XXs?XsqPI@Id5JCI)9FOIDe4RA#}WLr2sx3!!5aO*P}_k;Mz=M<1)r!#%meh z&iE|Ipa-?A7T6k<4+k6O@Ib!g%V*@_o=yiDQhfWn6O#yEDF)@!C_R``F?e0!~;f2ypi#}3XhiVUo#$ZI@2+Jhw%}{O=F~g-?0+UK2zdD(1U?T&IaLl zIexzZkcr3x1cM8z@@M4d<5AF2miXuee?0+2(+kiWK-oen9|)koK>$)UshLzp>SEPs z!|^;4KuMxxkfG z0T%$40Kx$E*NBG*zcd413}^);0G9w#fYpG@0P6uAfK7la0oMSo2iyp_6+nOA18fD{ z0oVqhzq7#bo;wgzx??_gCCi=Gk!(O)4%)a z>sy|#v=5<^z#x?=AJm?z^UU>(?9ae)jExAHVa5a}R&Gp)u}R za^&0}cRX|Il!jC4-v32p!WZve)AP>Q!xQ!WXP$QCqQ#TPy!~Fu+H>!yoHypcn_u*w z_i%a9$erhJJ6#{Ne_vBl_%kR1H znvw6kQ9bUHZ~9*UyQU$J^?7mBIp5q`QThGtpFFgp?{7~Jwtuts!2UCDeR)fKarHM3 zZoJ65{{GwlbV>igE%((%-@R|>zSw=ee>FXSaTnfBWAnCyxl6ws!fpQ`euiDZMf8j~CB){;YN9eh{p^?dh)y_WL(a{^*$7 zwgn$fj){lbFL~_mYby`uufMX--l5wzoYL#dpFT3~@ekg<^vE;6dphv5y9V7<^y%e0 zFB`Y>hTzb7VD_f6w6kpS^S5tUun>C(`MR&QV|g{(_6IAJumI z3r~EY&)(bfjsu4(@2tA+)1S?{`MRsGF8tT4zaD<$$YG}rou=#W@2aUk`OwGjy*}*D z#bbtVp8n2rAAIyc;h|UWz50nki*I^u$AdR*d*ByeUi#?2Hheif7#uQt;El(Yd9)F6 zk2bs6qy7C3kG5&6NApbcXg`2WSa-2UyD93?KEKnW6>alqJwR6&^k}tsC!05;#iMw2N=|XzL;#?a+FUmOj&?{pBi;Hsf0!?L*MLxZI=t`8tpG(On*G#b%H8 z8fXq&>(L&$&7=JaesMhSo&q|&AHn+r=tKLg7c6VQN0@L2HKG2Nq$#{1>aPYw7q zU+vLOfZPk-;8jZgpuGb4TEIQGc(n0I^9e}DX2`kdugwq=lSNj35ZVPofbwE1uwjJ!@<1KmNMt;m;mk$3svxg73ag2y$` z%R1=uuB#ve_>P8r$Kd(F#UAZ`xSs(Y^C8<>xHZm2ndyMuz~_&U@wZ6t1tRAp2k77hmJi`l5U+nuN9=e!oGu3h4AEq@^CbFKzK?pMcM~ zpnnc&db7Zzy#RUQaDN49`3mV;jOVc^U#G+U0mSiL&}j%;0zK_Q9GmBOw0|INA;>WK zQjhjUl}CFU;T}brevdQ+5#|)6Z3lGlD$3MQ@&nJ0%RJgQNYjn*I~8~Y!73k zE0K1{eIefGB#<8Hv=r{`tI?h~db(g6@(l4` zHx6lr`@4vzV}?ijKFYu=;L$hk!SoQ`;de8_(_bF|_1%U7sIP`3n*Fk~@H_`VeX#QY z3js?3&44yQ8n6bi9&iQVI{@k{eIKwL@IwIgbEtg&3_yLE*8%SUs9$mzK>ZFKqVxg` z0-OXG4L}mK$pDD2`2od%Nd-C$Ky@05o(6SjlvPlz@GpI0fzxc0D2F^4HyJC z2{0URI$%6t2EY#}22=v70rh|eKnq|6U@f2na2?=A!1n-m0v-bF1UwD+Dc~i*Yk*$^ zeh+vL@HfCAz~_Lk0X+>(I|eWWa2jADU@G8TKq+7@;5@)$z;Zw_h;0u7)hqwU)0jB~+0nP;E1I`8%0?GmN0ZRal zfHpuHa5>;=z-GWLfUSTB06zdc1$Y+l0^nDG-vHhQ{0VRna2Rj|@C~3xPmH$!0{|xj zMgYbDCIF@Y&HEOxazHB}1-J}w72pQI&4AkhcL6*-^npE2L^_X`SRW{H zk5LjE6D9Udm00uqatf#yFGF4RZpR2re~gAbn(=-)xh!5x&zok`^L!EYP7L7m zT{MdG9+*kLL+bJC*)$Q4znDc1Pn}P{BjK!%CqM7T0R5gL1TDFUp1-8gpLbmtB;Upl zWUIsDe*D073Ay4&^Li<+z7$uFauND4iQ_3p0=*XqnN|ux7Q#~x6NTrOB)yLem}bl< zBbW3HdOk7-zxvg2{CZPM@wg5@nz!N-`aMWNjXO&4>+yks_d7xz>*&|Bn__DX5#5`D z?n{}R*CV6#5aqUaWekts!jIJ=f zZJ^(8iR6AY7th9)Ipi{x^y*nbS^jUzPS1VQ$@$xj^gA3`p`VE#&3nE`?`=Z9iv^c^ zNi<{Zx#YW5WMV7$>MM!4_hORGsG>6GeNKqgMnR1yC@wkSPY3wQ_k`Ia8`yo`Qe0z6 zPMSsbwN51J10<_2>7nOc6y*yPrRTep`wgTuqNcVEkgbl&-y;$ zFr9uqJE#_TrxBqKy%#*cL0RDq5MiAlxt~f!xlqy1DL;NK@?(nN_Zex?yC3rC{}4hw zNp;_Ni1-QXes6V55 zZX=sHTNH#BDK76d4fK12ve}EJK~3LC`RUauWyes;d^_-?c^^HGIN?Y09!KV4D47e- zWhlwsrL)NOAA-dj6!$w6w|X~(!h5LOE*Q$nxLigMi>CQ2WKAIhA@W|q`zuOWkNHGVL64FF5dLKs)>CcEFFrRYL`zB=7-xUSF zAM#BfA~Np}q$qE-VEwR2LK#%7*N~n)hoN%Ky9r$N8wBeH5#~biTuT|WL?~lD>C5=a zPt*?xMgNTy{V6Hh`$M73HNyFU^NH{!A>i+*I2oT76U{X!llsk~U~UrWyIjkNtdeY)(bNq=SLNgBO2LF^?Rd?El(>U$1=gyALhC zKi$6Kd)Lv3TCe`jMdAC}4n^}0AG-UVz;REt!?DL3=WO&H==ay}Z8*HC*Gq+8(KMCj zS^4!v--#47+e{}m_9)+X+vGIA+kr@}Z9ta$J-92|a z@;Gv@muW%t(H^5u?>xUT(YMQ()?w%Y!{h6d*KS~Jr}ut(?(R>FA)grieR(^K-oBur z`}zb7&G(Gaw|Cwgxa=}~zPY}DUe;k4KK<-~PcJ=e;P;#kqte%5oY7$%zsvCZ<{Eu{ z0i$=o7yuqU@pwYOI5|L(_4Yk&95>hK1-BCe#wh{A2pGr0Z3rBW4H!cM#svI!7`;0J zkYa<;bAw^bHO7@8x?M(|uNq{1$=Mgcul}hqR&NJi;in%%Pvq|T#Mh?-V(&I4_eRou zzK&hSF>nVDL^b#`qp#j8FKrB}GL8d3KV+N_T+?C94j5;57-Kq&0VLI2L>MrJ2&o|Y zFoc~5hhakKlZ5m-h$dGE0gm;}v(k4M6`-FAr5L*m5Kj^*DJ zkp_yqmB!f1jZ?ocrXY&R9dIu-hSwl{YXZhBy>Gx640j-*f03GVeTRuF*!T4zLk;-n z`8Ej^%r!h4kaInBC_%sea-_aDrM^RN%bTqq7x@1Y?Gl3hZ~b<3gWFN+0!q(2=hLJ8 zGd!PCPQYCGbd>AUd*}VYSpJo99&+pS!^pWR6f{vfkhi@%EZIv1zAuUp<*SZ-6*)>= zmhPe&{FpHT1Sqh20Ak5%st-Ys-rn&8jWI!lwJ@YQ14jBAmvtED`t(OV9mY8*n!`Gb z!6@%2k#j&z5~u>!TLu7^xk7@TkYIqQcbU4}3(}odWsF16(@+jOj1fr98F(8ZYRCz` zjiSswd)Y4ic~3xhV5oDq(er8J7+46~Z_voAg5f}E4j2Ow1@r?UPJ{yt1l;kW{ry%`zA^pSk_uxKVzRVIY zHS(oaydWaO2PWGtqx{j|_v|vpBRV+OAw{)N+k_5dC>$XK*$rgCM&l$nQ_UtZ2LO}N zgMovs&;$C-m+dn8`SgR(`k8Z$v*9&IXlNvqJQ;5zJ0Olx7%=)#6%Gm}C@XmZeKX1H z(?5p5#PL)@b15aTM$)pvgp$)xI1wH&7r7A7uR^%d`WFZ`4f4?YF5+_}n-8gyls!D4 zUx7fQ^shiOMFhG+2KuKx5XFQ*K7GSwq;mZmq-wg}O3%BHQZxqZy#eTh^dJk2sfT~B z+hvUO=^gMF$=;Po_6UfPk1!*I#83}ZF$!)|;dTzJG8{UL)8J452bB6*!eJ;JsQ97T z$nQ|vI~fir*eEGIQF@?|k#IO&*mOAf^v~9jmi6l)1?84h^x<_v(Ko=672O-WNzud6 z7zOl0@bBf*zXShaaO_D1UHlu~KA*k?-p3=zxPX2D>7OVZ z`~hRWe6JVp5s;EvpEgc@hU)l!M0c{j6+x-y_L_?VDVU&?!{npv{t2vR>34#lGGNpO zj7DN0T#rS`h4b*;#)P?O+bCjVH0cRt7()L6QuWjC2L%e#*nlyER1Mc%sAu{EUT7g` z3%J1;x4{^`(dH(Zp$Pm^Fni2oHd8SBrDXPaHZufN%w}68LrV^3&x09ccrIIbiwxFa zbKT_f>5oBoqkQ`7-owUFy${;?ao$nK8>f6`j4L-z`l&HaU)@3N?6`F(?0xfQ8&!~G zFchtfudhy3)xpuOr2)O4 zrmC&Kz82k4{SicftbuWUeeEvvx%8cIAj=E`MEy>(PiLcH5mnvLgNDCU7&r<&)TciI z>fRvHJJ5ZeD_h@!sf^xZ~r8FBU17@nsUlQSTv z?&}c!?oGy+-rhw6jcEu4U7wCLQnOBm{siEYcNu30!@ zf}@s}*b7S^_*0%)3aGt=OrUBw9u4_%f;Uoa<2~RRDnDlTNB`u~UFbUL*Fy}_Gi3;Z zjPX?%L!LFplo^AojgyZUXF$WVjbeBfVTd7Gc?^a`Lq8XeG%OKW3O6K#BG6G0&d`_9 z!!Gn0F+iifJoK|01q{7Mix3!b!LaMFqr9^mj=;TkFYL-MgGVEjj~OuV0^ho6pU{p zv#4N96ejK&V`BMN(juyoA$n>D*T81@^joP5npY_FX?A2!f`X|7fKlqxM(<5X42^hA zjgF-}w+*c_0l-LfzxCgN{{Wx91N_l8zljmq0KA|DCH?2Ekr671vcsc`vPlUw1er{A z2?d^p!VYO>_4Y0vD0DLleP478k)Oxw{SKp^CaGQ`_eScM1dQW?#>fiexGH00xiNH} zF|y1UT5XIhHTu=SAL0$3Z=6_Z3_-)NLs*=A5#cF5)Gn$%Xpo>Es_!(kN2Eb(q^Ba% z6F)Zw9eK_;=`-U@(9@_D4Itbtp%3dfuF2cqNB_IXqGybJ(aD~KVm44zA9U93)n}8n zqUUjQnpKgHfgOD~uDqF!D={AvGv{NY^+WmLvdb zbb8Plv~?H@JE%f-U7cEx1gcIT_MjJZc(o{A*Mk++tC>{1Wl*fL=g`cdtp8cih`RTTNGZ!}ILsSz&?PMDCtuR4qZuHS-UCTPJ!KbBYYQxXg=-(Aq{*$HCI%#QK)7U;4nkuNx!(A_e`wV%^#1hUgSrH6=vC>5jWOAD z_er{UyQ13$I%w&A&`k$Fng&4m%I0@F=#Y;GKhQJIKjeA-jg5b1}>3 z9W_w&%}4JtbU2)b=wN@SAfpFQRB-p9p`&r=DeyTRcp|23X%uQ)++m=Xw+V&hdcdt1 z&Tj+W19%It5AQUOupXnit1w=HIq2=Jaa1h*ItYd?-{%mF8w`xt%Z1t-14fRbbR_}} z^XdOYAnF#QkwR^e83l9wZA7{mKzeW%{|yLvDkd7xJs*RRs<4x6PSS5yibytrlp{ek ztFxAHXNWIlh-)$-AeKza(trWURs;-8+};G)ehBz6;Aene0^SE41dz;VdTm`B{S)v# z)2D9+-zg9p3PK0I6;@1 zR9iAPHK{7xSY5kl^0bMmrj|+VspO<+8*VZO$52|_XDQt1>b02ZBX5(zZ3BYve<7;W z>XaEBE{&MfjQi(taChaTV)|T8a#Ad|qIJ^bSraEuoIEKOU5Yf~5)3#fi8L1>>C<*} zfz%{(eMxs5*_0nJ#4FAV zitlvNBk`ioUD7kcHQ}0Laei62&^OwqC@Or`3y&-XZHmC(7A#dS$JVr&z}OHl;i-J= zO9V~>V~u&jOyCCKgOjj77kE1`*3Kua0^R_;4=dmcfZqb9_wB&gqcLF{zo&t*uV6xq z-^T*C;eFo(O~Z=&gl)js5HO*E@o-=)txq^GUem4yzLs%5<2T1)&mP=211|-h4ZIF` zDDdsT2hPMEI^Z7xW8cDr^}u0Z4H(P!6Rtc1dklel0N)S1Z>(6QoABlsu@82_4&V;B zj|ILIcmVLVz&o(+NbVKDi7~%I;g(eJwDGcyN`({z1T$4@ZW|{4W9S!27|IrTbpsyYYS# zFu6DI`%qv?Umh^!OAm>c>^%wlbm88N^`=k!BU=^fGOV15-(W=d@KCx80RxSG)SC>oA4HJ z3-s}(#Qs--FUI@J68rZ6ufY3miTytUZpZshiT!C{DklxV8-N!Bp9B9z68qO5C;hkg z7w76GECT*M=;i=Zy2tbTz5S%z+az9+0A5M_*nJVZ&jG#^?#;)DvwIWjfVbkkFEF`( z+86s`f%gFu|Ly#~5t#I{Ug9N-f!Dx)5wIWf)=BK24ZIq3GbLW~X&*^<2>5K!9hBI= zADHO&NxWny@D}jf0elwdwoB~a226U{3Ooz$TO{^30MEqxVu}4Vz%%e3l-NH9cskx^ zOYF}Fo`(1F68nb(PsRICiTwkBu~RD8S7N^gj161CuX_vZKLiX@9y}#G z{d<8Y;r&&K{d<5X;(fQo{++-R@V-N0|2E+9c;70qe>3nnyk9G^e?9P-cwZ~AKLLCO z-eVH`8-QVAgNr5h*8sz$27?m&=K#Z$24_p`&j%ij_wf?@hXcbz28T-Q9{>zf7wjvs zUjrVA_pf^i>^}qy6Baxuv40;hOjPh4iT!(lVQPY}O6=bQ4AT)q=Yv_U{Bn6$|c=*uM=JRV27oV*h4f6uIEF68qN!BMX9SCH5zP zAyP0VvA+Qr|G~u)`)h!E;yoy_e{2s~pXuBJ)zbmMKKRpJ2*7?i+hD-`05H|V{Sq(P z3w#6Ip8`&Sp3X=B`?mv=9lKXz|5jkKV_PKlmjI{nK1X8zSYQf2N@D-ZUTIHu0+ag= ziT&GvFGGAc0dD}Nb0QEic&)_#1Te`@=SsjT*etQX0hr`oEb)>W;5yI+fl1B^iTx$O zB?YGx(Rp)a0l>PntZ<(crD&xn>4Tpf`+jFO5kt7eHG(o z;PrTK0#1OgL1O=6;LG7Y2bj_^8yHapXG-iJ0!--`B=M3(z||-^HNf+LD}bwji9QGn zn?q;5`;7$-01pSQ1V&k;GvWR6fXje^=&ZQlgE$2pz~G-pP17<86$$0Hdn; zHZ#7G@p{IqfGPe2<7UPUz!Yu~;~I9aU_6KMOvd@ZL_e1CaCRTUcmU%(U}U{-JtBn) zd<~4}FdoiWW4sTe3 z?P0u)@p{G$jOQ>O&KO99!;w~j51~=*hiCOS7L6{QEmU{#{E__euI}RbF2AcgdA@>j z`=Q}{>nzFPeIVke_IIDm^y==Ov(RW0KXqr%M^HSytGj!C#P8}3pVj=X?(#X68yML%(MB&vPK!4%*!{{v2-$VSa?gWbS zySf`_5X-0T2>Ln4ukH$(&+qEapq~7$?he|`@vA$8F5!1|m(c0_uI?0io8wn^3sFBx z=og)R`itW4+aaL<&_+8yubRoa1yNjsPN8!~SMo+Lj>Mo;m_+8y;^a;nO?l#)a@9K`DCH$`L zIy#Qu)tyH#as2A;BkFHb{OS&*^Y~reh4eYcFOGB|{SWiIx*KUKzpFcvKH&J&T}gNF zySg)}mfzLgNw08y%ERE0{`RteV5)rI#P90vtLsqrsJ^N55P9sc?qFI*^1$Zo!(fg6 z+VI>D9p0JDpU%6|`y7715@{#;ofz!V-znse_r3gnAM;aphfU=4syoC!q3{6p{#W)_ zcZ+T1cXh|u`y?;Y+`#r@H2bT&#{R(RQFo5r!0+nru@Zh)caVL_`K9h2dzRnT9b_B$ zUEM{tg~D6xR~5Zm^y8&>3{>Xa0wv(@Vhk$!<58ubwzJcY``yA9=h+dK-qrbzSKvO99_rdQ`f==?DGj~y-<%;R@;zTpghKg9l&O+>HGN4&uN)OiFtcTN84{7j6)_Z=(w zzl^*ge|5g0ki)CQ+iaD9WISLORs@Q0K6 zR}KAI-+v6=*7wI9^xKg>t3Sr{w)g1{ep4ZzmHtx){r4UGcRBo*I`|PUE5Bp#Vts$! zA^$rLf4T#}O26Kbo+%FhWe)jHcgXW!j__eecq;bR_}+KWQ$O44Pm=-G_p@Oytnb%4 z_?MzQS^af%pR{wuE(zDIMkM4u8(sREPsgQrKJN%>0_#FPPI`nnF<9&c5ed`_m zUpU^sff!c#{0@B$bfjmC!(R(weW?*&I>FKsE)HIao4n&IwGgcz$%`!U9S7mEqJ=NK zR9DYySi6!pJBuqV@gWx6n<}sDq0~fGTh-QZxluJDSYf2 zpI{SLuF`jml~0rWG+UNC$tb0(z)~2*X^zKJkx=G7Q4s~zU)|`d z#D%|>j|I39S+0ccf>M5vz)n<()!PVtbx|opTE>RCYDJBM zD=U=myhQ>{iR_=QNoldrMAQf~gt!uTwOnEj^VX1R5m9L4?YwgO15rET;*lT1-1H>gc16$+Ez z*h4wOk~ic@i0a*lZ?_i0Qzm)qZWg^N{S)!&i!2g0bxY}1c`UQHEssSe%9kj8 zdPAk&tf6dAd82}<1Wp{?AB`2NRb=E@|%fL>$Q<|src%!>ADOT zLARxsWvAF$o22c{-2G4AiC}HYF9V74PA)Qg%uBqH7MH|kTu~BP0Tftpp^M`y>1)?! z&_;YbD@v0f~h1ankcv;e(NW>>f)7TNa3yuA-^w!y#fbm<84<|n@JFtiJP&g z?93!ll_FV%mg3s^P{Q%)Za;L9-dRUnRzi;iMLuF|?poHk26O*NmUm`?@f(`56>e3&-V3~D}2F`o*| zr^)7%`7mvU8Pt55Vm=j^Pm|3jJfvF_(^atP=F>FuX{z}&#e6C-pYVxoPBI?AYMS|K zK20;9rkYPv%%=kL33FabdxF(e^*Y^rnr1#>0*{HOm`?@f(`54r50Gk#BA#wOO*5aS znom>Arvme7viXDuuqrTFnNQQqr>W-C6!WQoo*eDIy?#wx775emgj4h_c1~612~LQ1 wvhtX2K20;9rkYPv%%=kLX>#UCu*x@CnNQQqr>W-C6!WRTe40%2*Yx-Q05_k(-2eap literal 0 HcmV?d00001 diff --git a/client/src/vma.cpp b/client/src/vma.cpp new file mode 100644 index 0000000..a2023d3 --- /dev/null +++ b/client/src/vma.cpp @@ -0,0 +1,2 @@ +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" diff --git a/client/src/vma.o b/client/src/vma.o new file mode 100644 index 0000000000000000000000000000000000000000..41ba09ae445420a6b71a0e995d809c910f1b5c9c GIT binary patch literal 915456 zcmdSC50qS2edqbAOKnw27NTt#TehXvpD?PnlL%vo)nmNsR>Qbu!T`p>25h%2L6C5; zk!3daaU5hdBm$kxSR^xKAS~T21J&XrC+v{3U3(5AS-pQuHaR<1vLQ1hOLYrym6UAa zIGJ=$P_&=#@80|BzN%L($(S<}pCfm@`|kbS-~Ii+zx(I?-~aV5|MBu9DJ1--|1IO+ zJO7|(BqG}-|qRYx7~AB z>pA>-{#SbJr{L+=%&wKmX#rW59IzMue`90sdF;W*_U_*K@Lq#DHkO4K;JnG#ANp54 z2Jb)iU2bFSg!kdyyT?BC`0fvV?6I-O9^BOq?}r|*w|ZJU=HbOS#lOeKc0cmbk3RBn zC>tAl>+oH};i*NZQmZH_M!$_A(KGP&KlbR}hd=Dg+VLCuHAlDKc)zPK^Be9tfXF_H zAG;rUcz=r_?fCuai>}3tZ}_gi>M{KO8v5n14R{}X@UaI&Z-Sp4?=xo{pf~s`rN43T zo>_Bwk|$ncW4re6Z-MK8H|pgqz*E%=(64R0S$R3(@%uwh_-!7)2!+hgv9XUn^1-pa zNZ;7cfBfOy9r!)-7jC?Pzoox*<2}K6)kQf2P4xQUhjxAZ!(+ej$nIa*yZ^EM(C_Zy zJO4#m>o+~TeqZuh;I;jiDG2Z$+_me`5B-nEj|{y3#lw5IQ{QCkMZ(+r@XtS@e%tXY zEx7LA(dYgiIG;gBO)>=mzegYbaPD}2uH36ONnHks|Nii`?%6%r|8wD?Bx8^K{LgpL z@6E4q?fZ0LUgZA+*e#o`|o?;63=k4?Ze@?eL!ZV<#yS+Z=v> z_Z|2AJo0j-3~3koJ^bKDKO{YAhqqysldv~@*4ie&xu%?DKnFbR)yE#({n*&YAO6L? z4}WlM>}TKVTDt$g>q`#a1Ky%N|CO@o+XL^oJSjUKZ^wqpZgF0)14b1`>JFPc*$Y|n2jX)hTb=e z{N}k-9$Ksvl10R9OM@#KX%IDEb z)`$Hr4Aq*ehiaul`Ju@qxhvFZfIm`heOf&azY%a2eW@$jTAzd9l_ZPjN4ocMNqm6^lklbry&Cj20dEW! z#?uf@3&~4+dmIgfgWf^6V}=j(6fL3EVxdqz?0Dwum`s!oPXceA_j)J#WO(AoC!Qwo zG#%5pP54cNUz7eStA>!9^$U`r0e-`yv!C|(8DA;en;bY*daC?|o%&n; z*5@bSeYLoLVeV*hq*OR?s&RE<_(0cV6W}1+N{NFj9Gl=?9lt+?Kh*2SuQKT2nwnGtOmTk}f#8%~Z)Mtw<^MBhpTuYM(TluS|Hb8hI50?jlo z^`&W9 z={p0?Z|_ZBI!oQrm-@Fgs0%NyZ2&X0E&b#9H|w`#DcBJ4ImSi&eZcFW#(wU}w@#l& zE=<3c!XNs|Mv0cv^Xe~p{ND}!j3@91{7-@Z=fVFA;D0v7ADj$->KguxN3xWTUC;k` z{`U-j^mdN=!tck0|2+LoFb3I!$({qJ)K~qL_B-2Y%jbbByJ0qou{4Xx5oB`_|MJX^ zBst<}-^I3j8!`{gO;3U4WqVQjU&mI};elYxfwSrRMh8>8@1IBpGj<^dzYE&5SE)^) zgYXqC<~?6U>x^xV`jcOgj!LeY#nQ29=uw&+8az~RevNTI!@ncQ$}=1J+jHvRow(v> zc6+)?hBhP{{#bvrwDG#Ke5eMjTlBsi)~1VuCHlqqix=mfJdW-=KIj>{VDcp#Kb{T#$R`D+I)zN1do8$e`rPPh%1j!P1#{3a<6&**r(D7u>*z3~E z^b37SLynHSt zmnoi-ZD0R^vg1<&c?o49j={Fp8a;{GJfm^%Q0)%zESG=(+JUE5U_Z+bH45&16EZIk?ki5z23&#R}#mlAW@;WbrC2*tOzE2lg@lShQ@m#bHG$yoM z(_W^2^iO`UBAC5NU!~W{bu5p_O)6WBVuAPq{p-h8_a}prGbgW+e9d;+FtZWf&S3M8 z2^Z<4XkF-aa&9psd>`l?T0BGfVeJ3gpIZ5a70_Xzu=4N?1!S&F99d3k8T{J%yBPUA z_{W`ml6+PongzL>^m1v>UY0~#;)L3emxJs($DaoJsXE3_WV)NoZ2EsXaf#t&#@bCE zp=FTi1HE3(#hZZdhM~ddmqq2!RD2WtL!G*hdyuV{2E1%Z)}Lou;>SR2p1h>^NYCf7 z0agy(6lW?PI2dG7aNFcJ)CGpcDStz3uCk0Ck2pK$WpZ%4G+GWv5L$^9pU+#mM#4w>xF2t6Y2BVp+SOT=ypXE>^C0 z{AnjHabFp^ZqwmzbJzkgyyIsS@8q-%bEYMI)PL&4PxdTh1H2p$FLQh}xkUFXlgXgQ zf=;J%j#A;oAi84qTW~_$>}33s@XhrtJ*PYIy%Zcw*U`x^7CrYfjtn27xLvt~bpF}W zja=MzeWw$*FkhWuPI#U=x z?c|SWAO49iW|!g9=djC}xm=rmO^)=>wCPu_Oq{HD_8DYwa%61g73gtpKL_W!_Y-t5 zBjau3k*&Fy@$8uY(fY}uoB37fkeP$*`d(*0vL~|5=))DF+mA@c+Vh%oS z@pOad_W0)9lTi%Z*01WApt9bkJ-kdUUAETx8cz zjpxo~O?EsS`|V>y;TY?QaQ$yK@64fJ9li-C%^k9|&gLZBX(gYn9QSejPzC>@oa08u z9CEucJ(MpHU1hhGi?rWY4=wKTIcUo>62B_XsdshWIh=f5a8tXNv7yS-X}+Yqn)+0| zYrV}CoN4@?V&>pGc%QKYo3`fjPyOm&fT?&TK?mZzqVkpMFXrD;@+BMiO7h>q4lNav z3y&&n~qSRFo!zd;W)XE7cz{}?U0SWJ0-^(kJ8#_-(DZ3{_Fxlb!&j!91O zaHSX9PaVxgLp$Z3g_B?yj{`jV`hoezXzgMo(%-dF{&u!C2bK)jx(vCQIr>qaQn|Qz z&TV+9tnBeBJD-$1?dHbHVJZ)9?fU$?N%t--ze>5~D9$YQFmGSZ8c$oU9=S9<*xjZF zk{NFo>OuA?Ya-**BkO0rHgf;WF=(|DdNLnhxf6P(W#kX6jQl|gBjOD@;P}+fIF@-n z@jLLzcqpB_E?GQtT_=yyvWPy$n*am48Vu;9zQm`A@pY_a6^>O$*3C>XR`sPkNces3 z$=g|j>al$F#Q1%-RzO}Jn&qr-FqS-;DaMWcvf&_k33PDZze_>h-XwU_<|ziwlK z=E}_m`3rHh-i7x@w|LCJKBFneCcqRuVmie6O~r_p>W^$)<;Ts799o)Q zLd!st2`|^-S^h8lPVgN@8~HZz7h0Pw^mazHRGDZK+H}B+=%0ZnS0#WSH@kO8f;ghKk-L*s|R6jd6KvtvKXtrif2Z;}gEe7=!&5U%?}_-@?ygA8-}U z7WXg?{cf+T=dhlh_ij$HLzi}QlC3W9V&9p`Im9$TuUj;@h_QOPkNs*M&S5;kM;s`+ zJa=B*Fg{y5_5LWYp7N85@xpk+*cHF2zRD#F?C`SvD0zN9LxwO`IN!R`Ghwp)T|0sRujtdxKvC7EUn^ zqRS=uj|k_OpEg!#D0t)!-FgeO(wGB}L_600kCj>f5y zEBpL(RnJ$E*Y9J^GyykZqf37(rb1;i_Hqd7(Cy_6A_(${A92!aXf{x|MN;g>; z(ArvI;4>|Gh~`d3;#7Rl6!VeO%q72vFZ#anAlN|pJ&ohQ3Mc30hscMd_-m}Jkx3Ss z*D-f5Oa6wQC>4@@za7)mzaNmS@7=p?j(to9*V}Q)kJcJu+9>aWueH5d)698}@q7og zV{gIXGt6`OdsaD_dCv#g4Enu7|Ee$dZ~IBfjc{VEoVjXE^o{YK0;X_a{#%um;*iEw1!YSsi@SN}oIUPF2NxboI@rcOPb z$?>-MePO#Oy*HmJne#SYb|uUI2u{20V=qv!eX;+ld;P0pUqk$Z&2@V|w7ynCuURK6 zpx0C6^Cf4YuJ+Q|GxQ?Xsvp0Dn$6mDd5XlC5Thi}wlr9T?*xO4U+SwxM4SJgD9rdhV z%DeWfHZA*`?bp8x{dVI)yf01TMHUt{Cg}bRwQ0c9{J15G86w$B=Y`@azE1GO+bTZi zIQbFn+a_mZYlV^*$0}O0LAGSS6Z2<#e>^WR`4h}|UNG8aUa)C@C#{apuD5kK*@f!t{WI)WG98=pG!owS z&i8$cI;AH*@;LPt^?Lw)XTRGM(#xSPJetbsS8&yz>`)aP>cp+m0j+IpAfD0Rjf_?D z9O7n;zrWYTzPWVF`gc1=@~X$rIejQ@7LA&+li)7hk)6<7XJFmXXG^dD*5~WUd`E05 zy+=v9V{gGCE)4lka z_Wd7YuKXDP-a;Qk{Clf%Wv$#j_8mI7bw=jamgiPkld>i;xB3^(Hg@3?e*2tFUY<9M zJy7dlv0aos}T1NOto()jfzOO6-U69ZkhU@`=MpT5H38Tyb-)!a;FfwRoR+a<~e=l`F1}zn|De`%XOH zM0cad`xb~FOirhZ_qsOl-(bN*YXh%;h4?^al5N2nKo-UGZeu+7oB8-1FJ5D?4Neih zq9=kA;vLE6B;}%U{dS*cyPZ17O**I2Gcq&k{dR{A{k#0WtH5>Rxn{xQ9mZ0o|K>=t zSmH1KeNkg1{zp!X7H4<$d6@>UIm%<1ZrqCByw%}`oVtC@#|wj227Zm3oz0$zWV&&D zcu+Fkyji)T3(IuR7xKqqc&hJ^Jhw1j@t8LQFF z<9i|f*z{ZZW6r>l%r2oHJG#ISZ}Rk`F#soyvjH5FSg( z82H&(eIFN+v0W4Sc-CV$7m%@oU0{fwxiWU6mlyaK)1yiKJQ@2;WOEy|y^ziAnaUrN z`m&f2-`7~_cx<_>rQ(n-umq>H23c9|;Vjo&@xFy}QntKzSAUQLuQi8!_w)}Rc^Bsk zm$lCG6X#fN27F8V%=W*@XhuwJb07MbU$b#AKM_7QPVh-{YRngGZ;Wz0d2oEY6prRp zd2oEp-T=1r1pA#mI~m3FR;O2UPvr5c=&X^iqnN+Kd(GbmwEmG(M=+GnO6M(%BhD9T zJ|bB!k#}GY`DnTC(6!3vBu@?fdS(tZt9I^Qhp@icC_>MhhRJ&+HFP9hFH&BpguOXI zPFw2%4e&34r_PIW-^3c_#q#pUo9_KoCV#9s+*0#Ao8!R?;cxn?_z<3!-y*t}&z6VW zU-2QW!+15nhwg`^1DxSHo9UyCKD5is8+JUK)r<4vMd-4M`BgE!gTU}Y=2D!H8ST-{ zJV$e`5JOKWo{r-231aDVk0ZI2o!a9#&N>nE`kmrC=MZ=HCX2_5*QYV|b1PkpA$iZt zg~+8>iN%f+b4#|X#AhLX?OEa4s9f>)F~uudn_&G(`@lkcE8He|uW=~vE!nko3hJ0X zk^3dCvYcU+ddS3)D*c_s_UJi{gZx@b3~z#1(&0orh+P-|cKH~>;!W9l7gL_C5<^-p z)Xxjlx1Lom+T9Dz7-w}HTzy}STQuKdP4X6Gg>f70crRG` z{X5>SiryCg5vSRF2l<~1Yfzr2+8ZlbR1QdMfBG#L($NOK*kIFdbE_MF%ooLY$FPkl zUzSIFQ9SA6q6}ZCex5dVXdd`25z0M zwA#|5m1w7WfiIzr!Hf73;}!T4=n(j#I3?xFX5Sb7F7QR~W4?$dffpOSUIhIrEkkzu zTQm>nz)qH1y3p*Wz4V|r`8u{v=WA`AEf}otTE7{3Q^)9CZ0P~}zeI2A>n}ua_xyG8 z79FryXLhu<_(-&E=>Y4zSz43BF}cKthVdJ%sh85)x3PFDrgel(N>w&Y{U%rBShk9PQ9cE0TrP*P0K))qJ`F8le4m&)JT&%ci zXz@nurrBuxuG#2v)Fa=dr2Tlp$IYt3^SJ8g4FbT8P1V5<-Mb!U}Jhq~Ho@So$o z#*mTG99wNVDVsolW)o-^Z1q9EE-qgZ;MLJ{e{R9?kA3zHbY8Zg$=}%C#;~2;YuV1< zZ?~Q3bkNtiU^_h@vbOWStnG~S*mM@X$tmXKRmo|a9*1XQEYopfs|oy`{l=G1jNj|((PqNO zNg+3Re0*&hUj;uRd#v_pu37O_aV_~wmy>e2Oy$L};gL^K{_z;`gYiP~)z-+5gTo~6 zJ9L&h;w}Cy_2X6QNfwpswD^kWU?X;Flp?vma*mDxaL?%PZ=NWAB zti^j4%J>c!PZ2{|oJQZknC4xJkHl{gPOHE-ra{-fitnQjmq+~f!5%~|FBvNjcbpZGdCes?DysAKTi z+m*(-)N{1D+VkP=)NiJIh~P~WU+4Mo-V5`g=wU;HYVo}}e9*YW2VkZCU3_Tal=8vT zperAEAM?TD4Xyn^_ow~*An1tMo4bR(A!k4C?9HY~&YO2f_U5`ccDi?Aj&~h0P938y zd(;AL8-gKwLtkcZ-1FCmBKZ!m>hDR=zxOXR-_wdyyN|Uc-wo=w%eTtRP6<{-*BGA` zUESEb_a$5AtuJov-=4*ulD4b+qDH|Wp&AsZ))=F>9XC?WeUEY-rkBmRaf*<{T}M;ZxKG}?=oU8 zqZz-Ahy2a(Fv9stZ=(W@4#v1Zw`uw{{@fbjacc{Y26X~&zZ}^v#cow>muPJ^(Cc2z zgK2a$V}IBebwT?>9n%r^It3kxb=~=a$o>Q#ME0kCi|4^DU3fsbvnhV;H%DW?xi$8b z2h=wn@Yi_2-;@U)R+GOm4`Q4H4}y)3c~FIJDG!(*i{~Z_TU&UjPRxVYZVh`q$=Izl zrf%7-`6&OPm^zJ<$ZNIce8@5HI`o1%#uIX0fhRuZeM#}O*)73j4mlL~A)WBB6ib`k z+R}v|lxO&{Iikbn79CQ4P~Z5$U*iXVQ+{|@P5#FGi1BUl!`ZbgKd@OrCy2RIelVXD zT}1Av$-?wd#-xDZUIt!$|+o!?%ds~KT->077vRA#vHNa~wRL91= z9N!)FX}rnXe?6l7<>lP(NB)xiMq!`)(b;t~&yKH~`Lc47C zI`d2nZO_!zI)Ct8-S&8#e9I>@xnI#I%t5=+@&7P7Dvv3dD4$hqI^lih3BOJnp6l2# z%Q4BnMD>ji*tHN7ckAnG^d%a)vA=&o`8thH`7zTUWO5+vA>Yb8SUL|*^Ic@%bHGv^ z<*dxd0MFp_9CStNbIQG8J6`&hpUYW}6xmRl4ZbXlal+rVc7A(dFqGA?7bXwbiZXDT zJddX5y+4r7ee39RU426v<)~ysj?%Zrcp&Uom7bHcnVh|EM&%RH8Gr_TDh}`Q|GtOr zH@34j;OiJ{4^QI~PWm@N9AdEi8gY0I^ZoEFohmOgS{+vYNPVGKr}W##hu&z-QTE@{ z{%|wUHlm$q*-Ya{*SGdV8H}qvjd-pG8i8M!A8SslyNR?fP=5==9P{)M=ybxNfG-~G>W z2d>Y@6*XtRf;sb*+`+(JFy_mX%#Eke@1wKVqwCkB>zWg@KdfU8?R7bwL$^JncN%9S zeea*)#e|JkMR_@)*aNH&5VK1Opn{UV-pD!Ti?dmC)Sw?m%;7|FGXD zL{5LLkFzw7yaqV;utpK)^=eBTBc2Bt;5p=MhEo0UeS7^m{_@q^(*Buy8BK}Hr9aa= z?+LO2&XbfIkKc)Gywl4DehqLUeEKo^<^Ivn z>)u1z9%R$}quRH>+tcg!(#O0`b{lk+2%H@oC%(9*Yi zXY`J{9G1|x_1pd;(zki9Z($#ga@|hf?p@fEt8eSK{aG8mJ7k(Q(C5~M+??b4khcl& zA`_>ad_&gXiI3-*?@V~UrF!6>>&!uHFA#drytNg-1wDxO8)?m|PRt$jz+!%Ac0oM| zeFs1CCHzR}-`i~YE8`c>sUA>gj&jLc@P}q2@o#1$!KFhFc-Ku28uZ()2aPX9zC>g5 zGN7?#^nkfnV+*t*#&vro9Uo+miHFWU=;Ou)Fxurl%w5aje4Jpch9^Z2W3_{E(9ak0 zV8|b-e!5=j`=$9H?VFk#9Ge zp+mv9d-`Q`h<6v#A>9iopC#X8wue5Dt0TheJHdz1m&#(DI@Cp{^3Myd-zM1Am+$xI z1>eJ*;ZE+y@pFb*%^5CZt*3RDP}uLS@pjX>U4PR#HwgP58qiPsqiybvZ&WT>u)?0` zDtE&SDES=L+9H0LR=taR*=S#L;L$|Dvh(YX3QO z4tOhe(S4TMOJlr7UX3rvyXF^?yGJ?< zN@A`kABfOY4_fYqm2#$O3$PN$V2wtJ;$h&H}mp39PF!x6*kU_S^E0b3!)_ z)yT0~E*81D%lLac^E;i<_PlNAnKPg8^{)RN-I1P`mA_@Y1NbF;LT$pw2BLwU4bCmy z?}F}G=;Uo(a{%cDb2`m2CN~#e(6iRvMuA)TcvAaq`VaLK>nrwG9qadIaw>jqrtvEt z3GYKbKRh>)VeMaBLEInqJ~nRQ9&2D2?l*VGeb-_>?&?$PQX&5#`!3vUeQk+7Y*Tsg z7*`no$Ko;H$e85cfGhfI56uQYX7NRKMC-$MAgdZz6FaW@VJ%5=9DGLOR~awuZM^KS z8SP>>=KpK{cx`O-sXUY7ZhVGdxL8~{0%W5e=B;`r-@gHws%Si0+!(sqs+0fF*{9?{ ze3l&O&e;ft?F#~*OdPc9hn+CAC!xLG$+mj!?>C*x!co70H+g%_dj2t6UwA7YTe%AT zWL>UumG&3B9iP*=pCR0pVLk_&Vl>v=r-;15OYyIO?Q`YWBiU4Vt$XA*6i(b$x@yJY zq;TN24ZSCBOL`C7mf6SP?OBt$724$P?Q1*NQ!N%=kpE4(JR@FF)!-aY>nX z+u74$f9Xec%Zx>PJvDbLt)WbN^q^6Rb7boGIe2a|4vdg1s(X7Soe6Ws;SihI7#z;R zOKY~;w-w{{oVS%{JYI_LZUm2=(8u;=Krii$RR6(VeF;32p9_7C7X8_4_2+41{bB!X zFIhx~j{dNZ9sPM2p}%oIZX3I32Y$zhZ_eO1rE(Tq6P*_XgKY|PWa#)yud=8}~r@jw&*ulY?{t}&o9b4S@jdrPu7 z1wBrGsbJ%%uTB<~qgLCrzToTz&T`;vZG(dz&5ZyDn{05lx8MYtD^6-|Px!~*V%(l_ zx6!6S9nr@AiZ*ZcwBa12%JOKVoUQtTP8r*Gu$-L(o_n%w?=n1&@60rN1J6b8z-OH^ z&pa!J)qG% z3o(oCDp6dyvr7(ser+{b=}6=$c(7ix~w=x5%69r5rjPbrz0_wlI8 zMW=M1B)oh+++)#i7XO|;EoZAT+SkLFqPBJHgTZ1BCEOa=h6F##oC5#J-(?4G zGdv==0WZmh^1|PyAHfJYky!o>SI?&jpO3Tm=tLNY^$RUDcAXU(UGDNTmMRBc*RUMy7W&d1m+n+n4v*@c?f8acNaoLgT(c}fq{gPhfAK0RC z$GQWyMtw<#vP1!J%3r-ng(%B zh=s$RgT`rM4`jW*T{Ly;X@(2(H#f^W;+`qSbo#iDONjgWWIIFMaq6HaTE9%oLVUEF zcf>o^eyr7A@~r%e&L1i--Y|apzTsi}*i=Vj>lUZ%cwN??iyw-;&OP}q@^bHFjj46k zDn47Je2a8Z`WtLQ!nu)*T^P@vM}ps(7-@cGw(oZPqdwZ(oV@(f+f1!mx{b!svU808 zD*E;N)iu6^^Ij&a{8lU#$5u;?S8#*>&D6VqUIu!aeFR36e#OJ=Tv9gIeYaD(ZSfu78b1`*_|HS;=KQ(;Y^h2_(oPs~|XL|(1H82zv@jTI1|xU#4^A1^&U$=as=1SaQ65qWPXd*e3Ov@(-ZsuOpk*rVnQSX={7qZyDj(@}WyuQL< z|4oHj{}&V1o)y#g9_i_E-x=~cwAf!u7L&Q&Bk6tD;2GOri;XC=@%!%(1ewv^EB(&9 zBZ|A1YUbY$&`nVhqI7b@JxRr=!4 z;`9EFkuVO;Yeg6NPVu@x9nMpycckf^z2{x2-fMSs*_g0rDxZT+8egD~V2HMQw%^d- z>U+A0KJndwdY4N};5UR*xXVoZfUfGZL4Re<33%RfvG)|mx=K9I($#G27sptRZ_%BL zZMODDsatqs_-Gix4lFEK8Q4iD>pfm}l~Ae1TpnNAK)gJyVi>o{e5=sOhsxPnKCcLM# zhEDm!?E1&Go?i7i7;M}r@lRxD%qXpdDMURNAj$C!5%5+X|{fq zt&QCj*2bLu_x6}~D=u^2fR!x|cDz&=s>xngR*@gZuFIAyrtP6_7^m#Fat`{A;-3+F z2K`lDTRIC2TMrlC1y6YogU7SxagzHCJf5>TjC>#2-$%ZV^S398V}BC-gnUFY$#<>u z%Hd~PKHJI>$Y#ySKG0V8wFmzg;^flx$v)jp&3DV!9Otj-KEb}@t~MBcSuTis^UQ_{ zHuvR?`fpPQxQWkS8jav#nQ!oxxc|L+y8m(e_M~X1@=~i@vaR|};0>%x`UbeCS@$=V z3QwL=JHzet*yCAQ%G#t5(@9Qx=rjL5)$H28t@mH&Z96$j%g3*U*5u>SDLa>FxsRZ) z$cl1`JADj-9%SagJr88(rlwE8mF*bll}z?8l=xlZx8lg-$nbIY7aSj1H>0}Rdr}>_ zZ)QMx$~~03!W|L94ZWJ?U5W1`*}env4$qg}{yn3Q&qEA#iF2kN?8Gf@r?HQAxm)Yx z#kBQ%pJJE^Fl8&c;k0Sj(l~|o!Y%AE2i9Hcqn-9Ua$to#jM|H4e~ccht^SG5$YGzp z^ED;?7L8~4hhA%chxrTag77zg!F$QSJtM30k`HWzp0&qfa@^h5VL2!9cvW|L?%Lfs z?zA0#x9YzYB0MD%6~UuiFbDLTn6eukPUgTCoEW}xQC7AUTXl6yh8jF;{7wE!US%`G z-2&bE+x5ZDv51xdmT0NpT5prx5KOIwsVvMX>UVShInScy34iad@YVi{0esmsIwJY! ztYS^_uNrd;VkUL!Ee??=K97b{eDTI2rG?@$c)v6)uvSw6Ff*LGIVSv&kNh zM#xiBucW!g^6cJ^(z)ffreJ;14!z3VL)P=*P8?HO;RB6U*oj*`|NI<-{d()O^yv5W zAbPXo8<@jZJ05vBKHTGVxbunI@ULsBSmC z@7degr)XvL5?_$@L-9C#ndCo=r<**Uya?Em|KVs%59EyLLSs1ik?v#A*^h3vbiPYF z(Kq;2(a!npq^q9yJe1$R!O^^wxJvvZ_vihY$!b@4o4UX=pG2%QaFyD3w>vwQYA-zo zkEPo0%4uIw%xSrtXn&8Nqcn+YDt!)S9sNsVpo^Fvm~SUeQ|zsnn)zTYj=|@clbU^3 zObahEaYqzmg>{%PpS5_JSWteY!W~e?N8%CXtahRo1Kee`vo~1`F@S7G+3RJ4dWzrm zF4#rW2gqm&Y>ORi5IRNhi48{5FWvQ-FPr4?!I+L1-{Vrjg+ApNL z+->~5TAN(4veN6ue@1?iG5B}Lfct)`qm6r~{GL*0Of;eQH;3%bU0DuhRS> zyC9d)(ZNoEF@(+KAqQ_m|LHXKr-oPIYAMOlbQU{1VJVU1G6= zAM4cn4gNb*b=IInSMgW-6pf~X#4Rc(rVZuV3*EQ{p4=+CP5$)5L$%Yo|NKeLG4eOS zP<`PN_EU$uTC}G>;H$Mj)wB45*g>{6_;U96)a>0>TYs=U8p6x^Z}S971+3@_9PWq}^;dxOHcjJ*7f*$u`yx6;9u%wI?c!TBi71m+~3Mt2UGdP%zBIO zQ|&YJ)tr998lA&C&hKQ;zg1^he@3Q~GQ93h;777}^yTZf**Un8>u0_~++9Yenm3ap z5g*+?jF%b@CAH}|w&U4k@uBXy;FFJa>Q?pTd---eaUU@%IT?$6*)a8^J_?feHw@P#>(ZYukKYS@-Py}hqHkHP-}blc zJyTt^QUAI(Q246sGxSwncHmC#j(qfnzRz4+-f-Z~8&>`Pwawy+V*})?%%;*t`G`*k zzehXmDG-eEnq(ieJyqUFzJNWMX19UQ#}lki^=D^pHjvXPxNmNyJf=*z$lvL{j<|i$ ze~r7$+hphdmgr zGn^fL7okP*(rEFZ`lg+DZ+*iD_5IV7iyx&G2U32ReEt}5`5(IAlG0%b-RQ=v0br_q zyDT|h^y={MwZc1xe>wwcGTrw~N<-1v;-Axn7ZjuDd44mx500ECTdeQ)dysi9^W1!U z(uzHAcfOYME9kw;8PxeL80?vGws1RVs{D7nG*5GN(_F&YqhYO-c9x5MJN=RioTH!6 zcN$NSYqQ)3_FiYt>LUqj8+@IJ`8|De zl!f+nA3qq6uasS)&8_r9Tx2l^e?uJ2K7AV_dz5POyFo5;b)Wf!;l(@x-8cHnXOywS z1BKFxY3yj@k)*aU;15pF^oY*+o{DvEDVba8`_T81mgob!y+-~^{UeW={)JWAnfPgVmhm-0W|bnIOo?;p2)_!;4zW2;2N_@Csiicf%_MZ&w=+0FWh)9v#k+S^`YFCgdT$lKYy zBO{!bV?On4aoxg~XRn|60k*_)7p#+qeYNU~GXXlwmdd}L)hBb0t})Y&|1~ED8)t7e zx*7Haql@?{@?-b|%}sP}L@|-}3fw`x5F2#6(?fHtuzxz_er&HCxvVMR33d@U`n!xi zzC3&H%-m_WMit~ROUwRkV;*J`dn#S4a1Vsq+cP%I&d_0RWG^*8(OyM(arO-LM|*is zANIQs`@?wbH}Q6LgztU%^?(ijTyl%Xqhw95XJH z-3!lYTkM_Mig)3i=%qDIw=QwN$#P?Pt8HVsA0u@{hXG{%7-OkCaYK4$S2*nPIH(SJ zna;k8t-jU2XbatZJnPo2TK8y-vd7WiJ;{7N-Bah~b{_g`pIjY0!#W~;TCM@VGT8V8 z-)&_6TiQ@Kf^ORPM^$g0a?iIKwv=xyp179SbOrm>Ys`CVhnD%XM~t~N%vvDzZ$SU4 z``Lkwzt1{W`7`)Ctu@vT`E_00t$vG>nVP|-Z5%o|`f4JB%t30FSLZi-wk@|#U%C18lR15f$L;O2Yn*R|AO0Ni zY-?>Y(g&CxAW-aXFB_L>mZC18*0DFt?HXSvJu_r zzw2-g9O{b};hvP8U2^;fbJ{ks2kI}ZC!O$fW$k%UK1^~VeuOob&+4|9BUnT!fE0%=T8K` z1pmXjVBN=e!dLG?+-CZ%vfGoI&W$#CSD{_VLlUFbSpTnO+FWTd)kFA(y8;c`+MccS zl79h~a8jRb?+?fC6~Bt!s#96#`zCiH*mj1IcVX{|e4FgQ=&XFFt>FmgRs7~}lXof? zt?f7E;(`4JC*y(iL-p`GVJ$~E&QTWlDqO4&M++NIT4wwu-%5MoXfoQO^xVMM z=#qt}!6<~L5x)!%#%|?|UG?p^=XGfnxT+0fk?%dmd;1Ok_S?e>`W((T2p7c~ zW$1k|v=iL|t@I5L^`-U$^e?#!ag)gt%ZPCL8fC(J zCp6)V@7bN;ALOHspH>_BDCv>WPIiv^viZo{VSDH4WWOUi*)w#i(>~~@VlHd%#}S?@ z)SL75GzN`JZ6-o~PS3=JrUQKr#wL%;W^gI2bKmwY6UyxTNfKX^*fz%7WVuPb+Mx-muQoT|L9+1R+;=^fSbmD%N-vz z#_+Cd`(jReF9&yszAJ}pyczqfGOfjdhswiVIJ4b)$DX)J*6F5Z?|)o)HYg)c;9?hI z9L=L_KQML(TA0nIUM5!B^t;{Zr!^*xM=>gTShE;4qRF13i;2|lF>n~bHvHtq-@kSR zv6}YEon=4X&L<{c8u)I!_4{hN%7f~+$%FKrwZ9qW5b0cLq>CK% z{QK_Xl1(tbfgEu6q?^-dKICOV{8wLQ(?mn+G3Ru18ed0p1kb-AK5AS`>66aK3!Y?3 zvC-wo)D6gvWc5JU4x~(zU83^V@@=4k|8Rmp07x>JKTwH_?-SzoSp471!l80d18hVx; zrr!egSATVGNOeSO)wg>q$S3OF3YE9^YO)X7%Y*6#dmn!5JI$&u{%dUd8^&gQ)L1oF zKo%qeheF<+-)fimZ|3TqVp915#YEt4w#fSNb@kikl>XUpr$50t;q@uhQQ0+;5!wi+ zptEx!R;OG!vw**HKh~Fr7k-EL;aPCiul~h*`p@V1DDAm7-Y<8&*IL^djSqRXn2C6P zGG1r9i1>cf@8$4Yeipn;hM2of#I}WJ;o4vxJKDw9?Z|y6q%+NEH#oL|Bh;jrm zpYgBSA1WE^;PESv=lQn2P3FW?WKcW_GI*h};;T&0+s7&z3Wgg~6gOP#m|lV09(*=S z&v;Cu-lmG*I%FH1*pg;O6{a3s$nbUnc0sli@-c(<)n$?3xn$Lv29iqSF;%|{N z)oVW|VP*LKQs3}k;qQLg&Q4evK2Nx0bY!H9e^#GMoTWGv?xzHo_$)=;$ArvxO@0+i zggYE&!#z&Obtpf>`#Z`*wUwNgxZ|mnU(lRg{%%)T-;3(1yiC2)s&9SXa-g)ux7NI! zvYe>JE}X@PeUa9|7m zv<8j2c^{I{j#%is^`64|FuZh{QnXD`mhhj^JDe&zl-e2xJ@qYQ4JGf7%)cfgc zOU$oJ^q25AcaJCI6MaM**??r@Wu`C3XV+VQoROL-4JZ4iLhb?moTe-(mk;Z#&m6W{ z_$x`DcU&u04(u={VeHwwUG^wzMmnkzT>&lk*@)d7=FP$ zuSlNmu+s8=I64UuX;)p-~t!cklIO>R(_S9b;fzI!Cml+Z^==Id+F} zr?Li3VtSM$vnuoSz#i=z?E-s87ufAEj4l^JQ}K7w%TI>)T!CE*8G(wee_Q z25y^8Rz75Md~8AcBkQ}#*D;13))j+1xpUFcep>1{`D(>V7N2TQ3ieAd^j>-#d_&%z z&v|UE?fax(#RjscpM`-QnbF_Qcl zy7dh8?Y?C3TKRFyCu~fP=)9+7z{99f?~3XOd8v{6XOu4k=9{byIWqaQIy$O*xupcA1@nKr=B#8W$7%Gw!MmzaSuE61W- zmgu9s$<_8DD^?E~@>Vv2B2K(fx$gTF?PD84{$EDvh_I@G(* z)f2ys=kn#@orj^g$KDb9*jg#ER5ji&6YcRj)Ftn+>E$dB1#d$*51>9JQ`j1nTl_{@ zLH$#%vi6ul<#~5EsjbNaa-s2ye);#OTtZBe!>8c?MJw@Yo_?&~jjrDe&&H!QdavVC z#r1s4!>y}JrW&)z7O}VPKs?`{r1K-iXXkIEY%BkEGw-86@!6LY)oAKt&9 zIj!EIci+%Et%VV1e^zkyjIFsM8>2^SS2?4xY{la%Aa({SM~zTg;vjpWNTSP506S`(gZN&ZV5{#Af!R$QR^ls~gE$(GGUb*lTvSO;^qv0MH2egZv>mcztj^dmVF4uT>4 z^jrH7bXM8U9L-)k6W+-;gx_(S_+IVsovK{h0FPpv%r-~k)N`YV-C>WB(TZpKN$HTj z?}Zw=Z9yJ$H}^zHlk^Iv>0@?D6Q2l9vqhHMsOq!na=#?%E5$#V!Q6 zqKAHGd}^_CJvqp`bRBB%F`~cHJ$n>?+1B57sa~t7v~F9 zPyV|}tgHPz0hVMXte3aTg>dd_BQC|B{J84o*o&a!U1Nl(jm7@xJo%UOE}_y#y&uP~ zvXF`6HeH}?;H}^U-Z{VD%4JKw zer!CPm8`n&z*M1`&GqrucHbBAG2bs-_&)ym2V0Zci#KB9%@32)LFUL6E{=8?_sO5` zq?z)H@~4$v{0?z<1s!N?WlbMFkpI*AO@r?>Di?__e3R%*+^ZbbK5}n&vHqLxanL@| zFvnq@X!}HUce`v|8kZyIn*Ye>YOOBRm+Xh%))v}#w52Sx&5O@_Zpx0&)g?X%K410@ zJKz4jmX0|2V&n43wz&McS2-^4X0zaPLJVd8OLmVjK~EP`8*Tl&``kO@SzPYhh%e&n zL>xn3#CqiX8?$r{@V)Hi?uXrUbAB7G6E=|>zS&UYZ%yNpZYj?u{>^(iQoN^fmB~I! zRy5~I%aEto250V0V(zM3S$tofbf)y?mafz#r*9$_7#W+=x6U|?zA|! zbh+dZeW9LWW{)rLJibP2Vt!x>p5e&viP?2C<;y;KiaGz`>Dl|z{FBzIwZ>h3UGy&G z?|4`KrB9wpXd^h8d-2+AJ$hqtL(u;^`eHI7Iw2$SF<}lOo?Xa4+Wr?{`rP`UayA?B ziy{Bw>|^8yeOwmv;et7zwlT}zv5z$K5SJ@b6<^5H0c=2NauP_ zymw6V>51LR3z@ONyTto>?Ugc_W^YrYw>9rh-$(P%QU6UW@W}X>gKzGX_YKT5>&Ts* z%hG-za1j2=X;^>YAfH(AcSK2^6l(;#C>yDImM<4?(PMpYX9(TcDE;s;tm^1rfFG|D zNhZ~2oNG&cGqN1${$NgeCuh8AAI&?xtJBM5bF?(Jk;r^*5?-EYaw z?R-DV?b8kS9Ci3xWJuqMvpE(rt#!pcUUqH=rfi$Z4YF_c7~M?e#@j)yE6SeCMr)DB zD3eUFhW=2QF`0a=jO1&zrz__?;jx#mb;#F!GmRh7uG+cU*nWevWK8*GjVG2b?FHvu z#wSb-=ksmC!x4RB|9&yH`cN1Xa+hbPcSp8Z`*6##JvLihUhyN_V(^b+=wMgnOVB5q z*IknT&WU^NE?eZNou<2XWNF%MkHhJlwg=zZ-Zrv<ritpeKX$VSvDK} z7VqV+%F9^a-^}@BPwVhbwo&Vr4em46d(*GY-S_03ygiGL>W3IJz?8pLp97qQRO~)= ziF*lqMmzfmu!XO3r{#q60{z%D*>$g<_1P%yqn^dz_&0rn;wX0W*&=yV`K#q_oT~4h zwMHJ<3tLZO4G^0b>S&x{eJ11AZyFxFkniSRiop{3BYgB-g3~S^2d~Y356^hueS5|) z&$iY;(s)R5DDa8(EfziDX)Qlm;(LM*(5Lp0%IBBy!yEXg{rKklU+;Xe=I*iu+CP>z zXPv(<%a8UvL#|&&9>aO!u

    W}{&pQ}*-qoN4rXLS>sQZtH#5zS$7hsciAi#nEtf zaCdSZA_tu4#X zP2fwEQ-9v)%N6qpZsiYMJ)NCXZulteLw)hc?R#m-O@UG@zvJNO?Pzm z@!yqh=EaDwmMosTI9aS*iYx;2qGa)fOO(YnTQSET1?d=ZcbePO8d#Y#P3b-I8~MGH zdC*OTLG#0J=1%!R{@qp>Ouw8ua&kVk4`Un8V%mFFWO3aeT1RrH$QlxF?rr z+ipYUhb}Ew8|{T?vo+tte|;a{#T^c{7dd|(;#>KNQUA@Db|19kU7h2U-KKu-{AO29 zeY2U^RPG%gl-(geYkL)RcE#on$d=CIAM$HqVeL?{g8bJ+oZ|~IGJXska?cd+cy}kx zhJ(pulFFpY8pJdecJ9uTpX(fxV3yt@JCVGx^p?V4xYJ*8qWad} zYVkxmmElLE*Rlukx-)Tm>#j5SE1Z>|=()NT8`O)ToPIPeXyE#Y>{Cu3!REwkMn;oT zvTrWLSHP3}7QjRAE(UHY6W9!R{0+4UIw1TClna)A_ixO>bv!$V3At40>R^TO(*9C% zsPTCq;|KDjH9>e{a8o{kA9LNa?RO(HmR$Xux;tMM0{_)l=p*D0!`U0-S4x982P7Tz zXKsrwH)Zl0&Tj$JWC0x%O!>S5XLqd(eUvP!50&@$a`knoXALBaVPA`Us$f8eTw6JR zPbVE3@ajVIf}Z!~w4hGQk<6#5WW$xIpHp(3ib;%3t_+aN)0xrbh9k@yM+xJ^a z?z{1NstvxA$!I_5NNfGV#Y0Ip9*W*4jgZpY&MqWny~c;PYc`fwJepiTGWS3^8&`nME+AI^@$`a+J4zLLeT#zEN%=z^W(?#UZgmcdJP8by~!m+Y&r z2a0~bhhVsI~-&^+lq%WI+jC!*_%BfM(4N5FSc5I(HuiII>e$HOBT=8e(0}_3=JN`cT5x8RFNag zk)X%%o-5`5zcPOP!dHveFMRFg>$aVFJ2~vzlW!_lZaGjtFV&tF%>`{v*z4zu?5omR zVtEzM)d#-w^~783WhxIO zi^+p7?hCQK;unoiW3C@{F?o=&g!5q9mo3_;OfuBJq2SizpIJuC*xI`+dTEb>?OjB7 zLO<3Hx(G(RcOc9igC4~5x(uyzcz3}zBjS&{OY>6r^WtWGpCK7Mb1U?_1=?xNfiYQs z#KZRO3gVoj#7fV?lP|-MufU5JHxqB19~wM+>k#dRzN!18WGjAB@kcQ^!r#Fgdy^V> z724fSCTqycxhK2dqiC@jeTICQcwz0p$M^$$-Jzl1ilfTh$D!}I9rAg37q>4 z?vf?nZg?R-*k$_`r{-}J#ov0%hF<;s;#iKBhTm4IK4o0z#+BL`@hwoz6 zi%!=X*bT{)bVT-Ow98(gsZVsu#0KrNlg-yzQ*!lTADr%w3i{{x$lep3*}F~nyI9BG zgAe-I*8q2&D-Z4C{Arj+=k0s>@0nh^o_-bR#eDP?(+k~6bBxGW2d(fS8@tKYm2Pjr z1a(Yr*+UlZVN08t7{_A;c7W4g@Y*&KsZ@q8D zBOyMNY@~ZcX4lPx7%}V;bFcL zzV#|yC_o>_Q*4%9=e!!acGAz=M1wT$K@Xpuy^pXU>x#$WDPd;6yr3h2mQaHGT|0* zDR4KT#{XZWci`v7!Mej!H{;5xmvn-*$Y2>+-veBWw(-NF>U z?i7=rkq|D8E#zx&lrC_;1NYN!mb_42<=*>g?xGj{!=38-9?CT3D$_oRG?&D0 z8xydYr$UEoZTynG2L1*4GhgWL1jQ~D`X9CZM7%S4;(wsy71jM$ZwqxS$98j48!eWs zt?&%?S-;Cy!7Jdi&*yA|dObdl(RW~U$7qFjc30Wfq;@jo75Cr0jehRr+d{72twS}n zH5_gx-$on8wxYVbg-40AoYmshGu8bc*w*lJuKuyNc79~e!?rTvq}Vr<*?y7rGvDHS z9ZlM3Zzla8(Y>moVWZFK+rQFKkT=NmvZ{p}?_{L09^GhZ#FJ?;53F(H5DhBGS?TMqJu=V-z4=~rY46?{lLwujz4>+9 zsyDxPTXm(&-2_=E^}g;=ttnMEzi!*{@vHniEh3Ek8G0C zOmPZyKfB4_r61@yNj>51=(mRP-_@0VQ@`FxzX|HQ_AcjJ-M?mAbsW5u1I8v#Z+@@M zg^%sOYa4glkPBJyP?K_a|Io%@(~JjqT{;hnYi6p&yK;H(Q=SLnLzOYgrd6R&4SQr| z!ck?=@^>iv=hQp@VQ+WAr}<(2;fI3E$zF?o^C9M;j^52tFVL^C8=th>a#6WCoL&F6 zlYWxFIoe#R{MC?qi(#%`Ri4%_WcVtBmoh`D6S&#_>mfOvcaMcWh0gJV$W73`woU2={s%TfPKds zKdL)R7Mr(A4zOEc4n&!qDeov7Mih?bIGpgU#64yHIOB;>KIO%#GLHJ>t)4 zUI|^pTt_%E=EfH0Zj@U+;xXvt=0Y(IZU)Dj{dl3l?m#EX+Q)mzVR)+#o5L!_!(fD zpVB;WypW0ICy2Xb59%X6=8?RQ$p5aj^7LB;lq-H}!Vh9xWQW{cXnTumn?4hdi3X>M z!{%?nCL&XH@lgG|)62s<<(IF)7vD2uu@QOOYP?sCXOjj$!pCs zC-?@N<{Z`8Yi2ZF_4`H7BjZiLoA}cDAeO9zScq}z`%aqohx0zFqknbYd0M%4BaC7H zHTX!ykkI71h$cRUoa<-o*I{Qgr+v5h<>s{FUFb(J4G+N&w)|>u%gI5Ar^FX#s~Vq! zpSxY3;=RFL=it`2`0;GvTc!R!-|GH*wn103Io195Z9C-0S>1gPGU4ok#-%anTORrz zV8BN>ZQ`F`H2m7M+8qk()G@xRJ-(}jC$zv8^IZy)&kT;iwqe&o|G^gP8|h*Ef#%_F zpuL_0Ph|`CguDv4=+6I;&k4FQNm&?=%F5ujg7cob@3FZ&G5o@uf_Z2y$nf0qw(rdd zK5`So*En@9UEhLLA06LKEjn7Z=jL_pHO#TABV#iI18=P9G;_wwGK$+h?4Om1l3n=F?9S=8RYZ}MI_+N7K; zuK3wcp3;5c`hLn8a40?Tkp-REyrWmL%eOmz!O4f-4e(ArJlusLIIpC=##UM7=9t1m zdl5x5@o~kyj<%9X^|@gcG=6T)5sk0V=gt_{i@tu*>6gLlb7i8tVvs6)5bV;V;^Y2> z-@?ag-N^DU{m3>vlbqr}&>{Dm+_9}asa!cw%SXU3&RWkra{m11groM1aQBe2dF|8~QmM49@lF2t-iT*=3a#5`u7I7*0fha%a-brpM;G^99 z0CVch`pc|xSSwgl=s8kdDLdo#DujEWo*f~FHGciVSICvH&WX=dzQpaft&YHZawImN zBuB!$);;mtF|L^Hc#_;i3hx_5o7UaMbux z-?>)5xBrP?DG#Z-VZUZ>J%9Is&XARTjz(?U`!IOuYz%MCp8*yQI=5&x6THw})7L*p zWh`26DrR%DVNdOGXdu{vr5vl-67VQ1kH>Q(+(iw~R4?o^4P#Qfxe?A6LAwd$SI?4B zy%&8&uaMW-?ejm1+g}7njz$^)qvf1s-MNg5S-? z1xuZ$fFE}LJm^4X4~Ms3$fwK0W2c(p33fNlvyW%jcU4#ZBJ8`)z@x5YGVF0xoMiko z-2^srS}C}`Y!4E@^Y*DKpKm;%uOLSa@CtfrGVJwHzccaAZ1(J+?mO1L8sfUespU@I zGxa?U#7|XuigA^GU%SK zbI;%L&*TV|_ilbW8O+3;-(^iWe5(Wh6W(dA!dZ69f!A+l-b((Lxc^P26Z(x^#fY$XJ`<`y|RbjkIZm#J^y z)m9u(Y`@}g!gr}&G2CKwKVqb_rk|Uk?NP>sz8c+-3)aHzzMt@&&XB{XQ%-KI)+m;4 zaNoXUez=x+RWf&rkC&;_yoH#VxUJM2s_}io8Li9kO;zR1SBzffbyR&2^XvT`#Ha&% zKY}h@*Sdd*@(>5>{zLJqTKs_Nn_^1Mml|84`&ML|cuc+~IGb z&1>~sy2;K`eZ%Bk&u{R!!#mjp*)ok~Gh>PGn^OHiEB$`$5@S+77o-Px$}R`K#PHp| zqqlRXhuZ_9byv|M&&L;D-(?@?Y|95`_5@<@1gqg=f)HyF8(2KqEF#z5S8EP+MtaXT zkF)+V@_XJkDW{=5P8H~fT^*F|iT4dCZY&`~1#h!7-s8n}e*Md>D>tQgA+ALiRY$hD zDLx_7JN){XVClYSjYD;`wq|nyWJT@M?`)HkGZ^IH6ib>s`8?U&7WNb(PXR_d%shNu zoz}gdnZB{PnZEH&);F@6_PyHK1>*5q*ykHyWZJ?bYYQ)+rL~uRH$ObAXO~A5J^_D& zQ)Ern<`e4{zGkuddY=nb9@*)+=KOKHP#&+}hdfxY*|NXMRrz;1B_EB-i;|1wd?T`8 z`{UG)_WcOQiE#IUw@2?1Z=rv@cS5)XpLTr2eg8srbiPybnp8Hh4S}{z?15xsDy%Py zr;5R86Jqdq50vPs^?u3KXqP=qJ%7;Yt2ZdOJigA`%@#eCBQ;#;SKq2t%%bsT>=OP! zd(j#pCgXRi`^0Oqp~gROhJSrJL!_L8Vgs`e)Y1FEEBig0z4s@4^OWy(!UO-^=QjUl z-#bQM73XQc4Ka+ZZ=#o0mCK3QRpqexZL!zDRs3F+Eb^U|BbGPE-il_#2FH{y*Ke(3B;>N>i*@gb%7n}I zjfKJOZ%!7s5AyFe{ym-4wttF$Z{zn){{1_Cf02LhcJVCy*1Fggm3I)cY24VfJ$^5* z`j9^^zuV);yYf#Zi)Hrnly5@behS%Yvss20<-$kfd?WuF!W-Tzhwl99x*5HHn0H~n zP@VWOr6+M@g+8~_mtxG)bJrZH&>!~Da;X|8_j=3>yc^t=PxW(U^)3I!9vu6&V-uBR`ru@6L;csZe1h_>@Y^`uMKc$?9 zpI2)P9Wa->x|**VO#TL#fgak29`CtNb*Y;V1?H=}uinSf{hTuZKWx)r2|rmmvTU$D z&NW-inn7AVoN$_LK7>QP=f*P%fsH<80n&2Q`+0&T?8dD=@(T-#_(CC}; z_AEVTUM1Z=5%za6zmjebp-X+zEsX)2U7)S>dY<(k(M5e}-|kZH^X^(Z`R#n4YI>}` zu=RrPbg`8KRayHQnsQ6xo9NPrXpS9rb^*SZWfyqf z3P0I8KOtJ#z0#3?3u{OS1@U(a2kpl+p592`z$|#*p*m&oY;Jb=h4`IyYpXxMg}L2k za$14~?t&5ig}xr6EcKDpi+$ulzvifZVjo$#yhW!_PX569P+iU`JAK0^hH`&D_29(m zRy-QR4fQNG0VlYMOc-5@HVZ<@H|kWyMk0P+OlWDQ@E&=FZ^T zqq??!)l2Lox)CE={7HN6wIyF4pWTV)&~7h4W==z#m-^8W=Yt!vMTIB_Jf8Kh?)@^e zRTpV0!KY<-0e_CT*LhFnBPWUmgPGmMX3GtPlxyLT3$HF;z%>F6JPo}P7YAkU0@&At4qAXCU)y0*mnSMGV+ z9hdz|=6eUBZ`ohv;eh)M(~B_1u-169!@C&v?~XGE!k1~imQ|kCnk{l@>$TWN<($vI zM=|z&3v;yBzN+u-2Y2P=-{#h8Kb1__{eBz#{n%DlHbHe>@91lIK{N8(cL}%eLnFn@ zx?f%NJuSGuI8^%qu$QBkbKoW#4uv~SMN`UExA35Qr&wJ$8Lhjl&moL!)L;8Q;iz1V z;fSpYGH?LS4~a4lmxoyF0-BYt*~Ec;M7SPhI|n zI|?fg-!REs`-?W;Uflj~l10TNRUeC~U4!<=XV=VFT>NVZF)L%FJo!5O(f+)VYi8Ci zb7iGTctsy3L+1;Q9@K3DTl`JMt`F>uG>b^|fC!%+M<=z=z{tCK%y!Bjpg1$elahK0-@ODA96)ftZD`Z#zTV?1T*R7`$WtgPU_gWnHu_)*#;6>r8vskOrGm#xIPPbq*PeU(c z*=Qg5_Fez}yYvZftl#ha|LnaDlw8$y?|G|B4OJ2f2#|%)w>m`C$V7I4QELH1OKlqo z%wRw^U@%}52iZ7_Mf7HQyc8aw&OD1~X4#5g;@BjQRtrqG@JBo*ldjkv3AM8O$_$U+ z1XlQ7W9Xgn7VxQ$g72ec%x^TtOyT~zD;xU;aDUyyy?+|G|HAk27w8<(>EpmaeZk=T zp=Bf2c)pBfEx@?}o;O${ISlQ8$-m3+;GZ>MK8`$|_%m|p-zh zW(#pQ%_(pGnf|`g=x^dH;Qd{Wrzakn&t5n@{dV~F?H=EwQQcAM-Wb%R2Yej_eYI3DervVm5u!o zyz_a@1)lFer#;Vqw$aDLXCs~m7dFPT{{6E&2cCb%l^LF&VUFT?#%SSr-$x72?{AEm z!u_Nx8`}@uPkOkwO#}B65e!cNgLt0$0fzGLlSlb+?vEkoz5LtI_2Icslz}rH?_)VI zeAL76F<|(phoP$#2BVvo=h}yTKOc_pvnPUW53pScZ1fXg6AXDWk;3+&9N79jY##!) zeh*u*6}Iw~=ztF-Q)T8pu``MKENK{&fcjLk6_1<*-H&K;HWTnPXd4V6+uXaQdGEU1yM=l0{J1~IJI2qfK{~#ZZ`)12;e&2) zA-UfY~8kJz=~|d#&BH z4%-)ec9o$UDkbORn|-E?Sf2Ay7J%DJ67##IXIS%njgMJPVI({`jdzpT7x8XP3 zeuQ8@{teC<2^WRl-zw8bg|T#gq{3K&S?g=n(>g+HY?a0T=U0Q&~j4z+i{BrWZ>$euB>bC+HI=-|R|8qgMonkv_eZ`G$DooXGf-eN;oo?*( z`8M~g^ME=7Xm+D-ZpLn$zAkEitE*3Lh56*&=pD&ey|3V85bss^l@ zgT^S+W|iWf;(O6xy@zEzuw3O}vAZMh|=~+ZS zhdOk#+<)IssBd})-ZFm%KD}H1%(XXc81ifOpyO5lK`-A!_+#~J;e%^E9WVFuyd1n= z6_u~^ zzb5c&Im53e$M8UXjURsv`eg+?6Z-7(Fs^_HyBz=S^7L1oG3pzgmiT&0s3-VDd-&a_UjXeFO-FmTkEYxR(>shIzSNwQA71No z@9iCU6fH%Er+F9XtiMC7pXy!Uv&+xvTtBCC!Ox1Qe4a0#NBI(uw-wN5iKE-SK?bDX zRKBRd|BbfREez9!-yP5|%{?W)X|NHutgq?d|E4Pb@)arr4;eeY>%ZsQ@i31%MzJI4_cLhSc2?9`-NAYxOpHUG*Y* z5;{wlsqbFhHT=Qht229G{n=QW_NT%=!K~tEIqhr3#K{2TXZk2+bA^;U9rjcelhj;F z+>xX^i$puYtk`j|J&%W6FzSXgc=B&%iYo`7EsZM^(=!=G-VJ|@6UUW*k+DNgip76Kf53 zDz%HP9`w4x<-$1p4pUZYf=@Va#9O29zKrS|GiR80o(~Uljv4x-ce=d1v`76acT@h$ z<~H|{-giNEjwYMvMzyi~W#D(qReJfZG*)E!Dex|M{@j>fuh(d!y@5gUK17qyPw+Db z?5q?yG5AsBsn~fh?ohLR2h}(J;(iykQy$q6XZ8bsl`nQS%kW6K>JJnzhDUX-Uv(Ex z9^|aC&heUGeHna#Up>%&k@d&f=E=?8e;nYq&eqnC796kP2dTfh<{a%|JOdmx|IT=g z_o~M}V}sXLH1V3Aqdkn@p^q%DE${hb%JO(^)1oXL1W&-7=@Fjm_%?&gU-vQ^|Ajf^ z^562FCmBmP*~gp>FW5p`Z>+}_NAu>K!qfzQkoG0hF{Ph4PqWeckoC(Me_q%}@pxYB z`qOz{;qIu{19rzx(d9nh7xvTewT6A!;Sjf^o$`O=gUYU#o6E&xl$XLyj*K< z$bGAN{D-GM=JqC+_sHi}IemG3y1YlQ550W1KihQ+zQE0#Ih^D7h_oj;pZ%N0o@CgM z%(XT6NwKXdzb@#4hON;)y}JWRb=k`d`&g+SSnBlv`FR%v9=^-=yXyPB9=OiYrFk!N zH*=}^ccuq`$LMzkdzl}gkE|Y8(bG>^o*r1ex=9acyp(3{Oidkm&DfzZhg_ao(X&08 zgXX7w(TU6Ddqn(zZ4*DdJM4w5<2me`27Cs2hZiLqnSE1ut7YF59&6b*rLQsCkE3sn z?VLd_RTo~&$fe+l<&yc>zG*-E(%|8!`h5hsq;HeU{wBGkzR4wZQ@Nx}FtwM<6+Oy% zY?etqJKsgJ0$iyqE}eW&kVR~;$s)G-#u;t!lpcpjRl|AN<@ zHRu}dS_m@we`ybb^6C75!Ah3zG`G+u^#jOpJ-B0O&%tf{zZL(Y-|YE@`aMY%-g&6B z3>|>|dv92u-2;8O2WREa`!%PY$*H(^dGx%H91I@=s_Q-RltY(B@A;ncyZv*sp4rX3 z3YeDIJIS@_aK6Z`jt5Q8(67OB zQ{->l1b$-QR?m^Y@kaX3>Y3#|*D~KcJ+tewEWL%dL2N|oTX>l64R?(2UiTJ^xdgS`+1duW@wm4gQgyp}U?Hw-R2rdwyTwpLhG`@P5cY z=kdbo%UgLNrkC^U-!=Ko$nFm_j^+6n-?%&ebUt$*<2?24(4C`1CEf z!=H4xV~puw`VSFIoBX&(JZ?3X#x)!Q4{IiiTfaM;(-FUWA4B(Boo0QYy7D=7en03& z^No=9PTI}J`7|76G?(?*q1j&!BMRI(T=e@2LPV3R!1Z#=s zQd=MNW_-}MVOzZqI@*Za)EURtSNK2rUXAjZP4-#;=!~!FvTpFB^VZZ!7Vv%nf3;@R zbD`-O{g!#Z!1hI|p{(Y8G>a)MM2-u~f2p$2$Lg$q*LO?3&ex^&W{MNFjq~(Hd6f(C z&4^zpMi=5-HlHh_?+QnF(zz|2-LHce@P%(^HupKezea`7-Xri3Z zrJ>H7iF+|eof}Tqd|_`;eXQa;GwfYeI9Fu8#KzS=IQrvBUl{R2PyyeE`*x-XiOo&`Iyr93CobMRIsK0Dr#)z1MR4SrvdvkqI~*I_w# zY(#WDlenemEcz=BFFMX62krWY=RJ`SztkS~%EF`9h%YMTg!p2F|NiI>EY53$yBUsR zBadBxA6wM-@|@05K5@7UeUrayM(fe=T^@3w?auSWa0*_-n#63Z@Jt-S)8!>#Ra@oh z>g*!%ar-MfrRO=wpZ=b;!2Q*IO{|gaKH9UP7j*c~Z2FmQ2epR>D zX8k1_9CBGiJ3D8F&eJ{DU!HMI5Pug-c$`B%FK0T@RpA@#w(bHi>K8lZ{&u3rJx+tn zhO?%U8F=SJhu?Eic{qz9eJ7o$c(ds~={o5?@JO3IiYKP;&>{8?{MtKUNZ)z7D3`n< zTG6%wYSm9kua@Xc9$!yajM-{{>Vn&+eb4oLH#;jeH$Iko%H=PB@W zG|b7%g!ErL_GOGMd|2OJ)(!>V4j)UfLdy;3`a1I0G=5qKA0X7xddqA!SjLkb$Aq5Pw1{?!94VGvo4Kto{RZ+Sm+z{;wkZu{0i~1 zyDyA*0Q+>r)JL?A*BTR_*6e6_t^=R$?lK%ld{gg|uA~h*w!X&yp3m>K{&%O&1DDKS zP(Dq6XI&|8JRk|H;l&|T`qK2dM>wGDl9*rF@Tf(pH;nI(zk-tkDd#_FIwOS z98kWq-MOK1??e7WS8w$MYm|MgzxEgJ8adX*_s8JvZ>{Z}`ZoGg-wzr@e&(WU3fC7KjETWUJ)O%9_iG4d zwGZcTGxudlpZ2X7vHd&vdSBl?#_#urdvefCG5-9a#p?mG%xnpewseUYu8eTcE9XCw>c zMIbN3ealCnznwid|GrcE&F}-fue0xl@4mZ7cDM6QuFvWU)n7?|x8VTz3^w4l=M+}2 z3wbX;Sl@IKZxv3t`l>5f0?l=q4 zs(eXYw#?P*j?0$1vdiK!cQ;n~jd7X#E=2jtxNNa&yCE*S$dz3amo0K-8+}v~N;C@z{T(5L9H7}vEENwxC)QpQ*`8SbbZE?&fVZa-0bhy#2VLG?wK;o|MQ zd&gvHRdH{b{VLTP`{C)Wj%M$i)E%6<&vl^KXrr;qzwXMl?pI$bTk+2e-26M4|I~u1 z#~(@SoFD3xKjQo2p8WuW_4l+ZSB_bzyJF)dRpk8q@~U58U8A+l+aBuk{;`b*Z>)e9 zv@fnX4ENtg>~h)Uan6Mt7+pHjT_~NX-Rq?w>nGWV+MVK+28_bl0N*gs-ANA$u zg>vKqU%ljGpW<=DvC6=61vt`Y{MQQbrO(huxl@|+fNxh=1`hy-%G6io)3;2moVs~x z6?4&gAT9r#FK14n9KXbPz4kFryN~e=oHn*)(>?h(RR3u|ANBoww0%GKP1BF)kzC?) zvp($mVqdo%OuJ_4t8fTkIyz$XHQLc ztjUMijUUtzeRs&(;>YoA1e4W+{Y%X*Ii9&|g!^!-*5~~mwr%7&-|ylpA-^;9L442M zsbcFg?ZYIWTj}?dG#~ENACd1rm%H046Y68`r4@9x@>05WcN_1+-EGPrSnYL}@qJUB z%6l)h^_}pj|HOJn7;gc7|++D zH&*>h^#Jv|fi>JS$2S(PP#fW8AMXq&Ya=?Y1&4l38P4My4mIw2&5`{L{CaEG)o7RY zwaAs}d!71D9_L8$!{0fn_2a}MXo#P!ST?!iReS~eF5`-_)(MZkcozHp$$76|uD)$v zv=Pp_(LtJr?jBRQ{txkNcmg+n44&k_o!a>h;;e6HufNmciUY-WWbKo`b584Q>;D4u zh-?YIt;RH4g0E+`M0LC^QQd*!ZE0WHb5;FUBX2PE`SJOJ&+mNcIDXZdi`Uk7Z|7su zzhIUx8Du*4B{fGqt4{cv@gw_)-4ZKQoN$8w-RuLxZ|}?dMq8bJNLR@ z))N{Rf9avkURHR{*t6AFe<*Xt+UUm^@-2p0H?hyTq2#{JI#?dTZ5~=(m&s9v2&v}>xIG`XP;+UCl=b>E7*SqeEC|{ZQ>{Gp9EZn zb&~iXLzkR3;eKK6s;-v!9>S2vWeq%ucMNCnecVs>EcpBvhPJWaSV-^>;XCN0e&FZa znEK(jwDNPUuhZ_~F7!FN969%QZrQ%$nP20)N0l?(d$lestak6ln3L`eEKU7yw;WNO zK5rBJT2*sLPNzzDu`hye6yl`K-*2cLP14+X_T8M^(2jjEY_0F}Y;R|Z$iB%CdlEnA zb0nzC_o(+?y0K&L3EEu$!3QSK;kybX=Nx#TaL%58pzM7geEnqi#$@kte(U~`d3@*K zIPZ0KTy3^|aBX_O=>*TZ-*g|cXEICw`VNC^zGD39VG0sL=^A!yDO zWJUF4gG=0>syqI#H+lHePv{#9p&!{3yE6lur5KIkO~Qx9)wd4Rm+GhAh_JEVXEr|c zulrdghmVInnw;^Q#}qH=zsV`M%A1G1W8U#Qbl1t@XdL0C88&^BBA#D5mSC5@g2uZQ zcLdJxosY2o3v)lsIp#QrzJ}cvPBPz94KW>Xr}$#oul-bSU>0$k!R_sR>o6Yvhx2dN zIKMCNTinRkb0(Y4w>Y5t^AwK{->THRf6{z;AH!(RejRIkruTlG9G;EmScgxbQ^I#F zB-ep<<~w-1sy#UASo0lJ$NLT%M{9h;i^T{V`zb1m_dc~25{)zVvPo|ee}`ACo}0JV zPttK?k*><**EjW{x^4Z3zQ!J{YzOgqzGKb2o<6_vdpP?2>FD?8qThcQ{jNs8zZCs` zKKlJ9(eJNDzrPm!ej)n(SJCgkj(-1L^m{D&{ZjOMBKrNE^QXf4x)|*RXD$6uR)L?=Itz2^P*#D@(mJdKTl%4_0{^9Th$FPrp{xR5 zrgc{3)S;{bzovCA$*DtG1wKyebm!EetO9?hbuP=PLsoH~?MkhiqXn{(<=RzVKaI$Lt;P*y=c(>gci z)S*l=n%22Frw(NmWH+tz_MAGDRgmel&K)^*D60fnmt4Fnrw(P6Am^&{?wmT5Rgm#? ztoP*9p{(L%KlVAB*PKE2OHbdTwGjI$eBZ&_MDzKF8|6b@&b6)yYZ6Wt>i7q9 zbOt)2M7)hLoWI3$g|SyIU_F7(S(V^FgN34Y-+le~p&|o7%O(KDTRgU%NKg=XPy&wQGZYZr5h8T^sCkyEcd0wZT5OYct-i4feTR zn~vRW=!1Q3*JgFQHrVHOZ8o)QgMDt-=Dv1qu+Qz<>}uBr``oV0V7oTh=XPxlw`+ra zZr5hKT^sCkyEYwTZjF+zb=`IZHAt)8SVNVY}YULx?P*YIc-coX{j^4qycJG>eid-Ix74}^?GSs(qgf-h0ny399T|Pp*h&|%1BfHD~TuzZQ zFrQTnmNO}z*WWuv>JJv!r{#`;FYrvi^_@J^hMY?KK5YGw@Vvx*FSdBzh`r}qgZh4K zeQkKZ$UU#q^E)Q%PblB^wl5C!o%coC2YyoHaUOj?e~Zd5y?{Kw+b8SKsGPirAG8v7Ag@Hb0qq{Q~X; zTbN8;x;U}#(-|JY*}n*X%-`~P))cDsLdp1$d&noXH$KrtnO_?RcuPrg3-{(!jeol~ zR=Ib_{a&-Nsy4*s=ML@Q4%Z#r;d-v8g=p~M#+>UPc6UvbOJDtj_6Rt$wpIPv`Ws#| zx-zfQHJhsCWaND6Jg_M}+jMi9pB45(hJ4-kYW#=GOCHzX@>ehB%)tYX`@Ro`eqX+` zr!qI0e9=V^t#v|uD9(h3J@W_J=e4YG!Z}oeB)LZ@TJ@Cjjc;tR~b2ome!6UaAk4)@Ire@6}UeOqa z9PforqL2QIC-#9m-7WGIxO*BN|0J;QN0wt*joaTrJMqTuKm$jQl<#yV)acQtzu=$T zAEU<&Jc}OWS~>iBdXQ`8o;^MG`{y9DH-z_|9yjS3daU*IcqVc5Sf}xz$L0JLJ&K+l z>s1atu8Qb!1N68FdaQ*W>!8Qw&|^LHxC(k)3qAUv$A_WE4bX#puy2tE_wD_B&+M7r z20eED!Sbsp@2Uaj0i?GX>8@d=*3_B6t?o& zerzOmau_=~RMda&{#lZ6&P{i<= z{92v=8YuqG)k&xRe(Y-H29Y~EBKvAFo!&PQ zuJPOI!B^3<_odRjRqXig^c?pc@&TR`9iU%k&t=!stuX8S{aCTM*Xcc{KlF_pbb#<- z{ph#VK}QbsyE^JW{$B^KGUI{Qy(^i2ZSAAibeGwSzcbM}lMN+*_rGjS{gI9h8h3lh zz1x<&KwmCa3(r}db?klnIqTbU8;yr(G(u+<;g zw1Iji17rOidpB!*Y(?`tTHx>U@T31qS6VKQ^)WE|&M%68N-I|`N0(LS`Ta7L7tZ^| z<;lub%Pj|ncLwL$_B?dofmV8`pRr=c-r7fXzNCR`-Sd^$Zb@s2z>` zc(otvcYD2p<@9Yy^)&j{Xgr@5lFfAHL7ijQb1FCVXGH#<=y{_@<2a z9n*Uu=?A{ptMfg7t37e*z{gh_tT}M*3b@AZNnR&tAMdNf6KQ|{NPlneJgV|CxnC2q zHU3*up}pPjxQ6o$=o;g%F6uCEbVW6kjp4J~JqY0ED0<22iQn0umz|eRR1RNChZRv< ze0^(+ep5T~nSI~dkN4NWDSK{uiJXu^ltWqrAE$QSP7dL1d{g4A*0agoZO$yukIMVA zIZ>Qhu9}`FZ$|LM>y@zgZ+rS@3zy2J(cVyEd;Hpu)$ergx6kd(r}Yc2zV_#3A2N9Z zZR%3*cnzJK_E~J$%LCYl;d8MA;4c1N%l^@2BTx174Rh@0C%M<{nWIa44t#xS&jk8i z=eTr!Q@Kgw{d^NcbV2VL&7ceSV{j&H(%D%39$hhVon&JkbbtA-o|2EL+FXhAXn$vn z@jJ)|Es_s92cCbaFR?u(-F>G1HWyrfW@G!`HKMzSjs|-BF?3g-$sITp{rCNay9+{j zQ<5npLvQodZRfAs2=oE5LH;T`(3e!tnahvbXs zTvP?!S|>joy1TM|&Vy2R4m4H272;~@PiNr7JL*^dj`Et7L$1G~e|#SBjAi}b3+{Plx$vJvt=k0 z#2)IKcX~e%{Lbjx$aen~&+uSX-_(VdEH^p$&B~YK`~|t$j`zv!6YoB-+MfmB-l3}2 zHR%4iyE|PxNxHA3+-KUQ>z%7uGmhRh!u=<-@19$l<9q|1Nl;lrnPB8Dj%uCf;l8^@ zI9Fz6vdiS7R*6N-Q;wnXNA=BL#(ZGV`w@asbFq2w9{ctPsCAZcMMv-Bzpy)ptGu`0}9|u&+~bfj^SCkQ^>w6XU)JmvkG2} zck0-G=J3#?js2=>i93`Ayf0|)xvT6@{H(jm8vDJ~5_gp)fu`z@yUJF^&)ij}KD#OV z8gu_k{txZAr{_qN7hVEaVO{Isrk_IcOM+kc`U?Nu`~>d`D<`JiC@Z+KGwt_tIej$7 zUuNSBsSY@-0Aqk@f2Q41tM^|h8|cceVS2rdhCefFu)`-G%bXifJLBPNrK?>Gpj7a2 z&#U6Hg)a8FJ}$e^m0j-3pmB+BLFlY9KDFylWtaG}wb+sMy{x_5dZ=Z-(1!hK7Zbmh zdVUV|%Pnu*`AlmYYtcq~M=$yHDXlzRM8g~Tk9=;}eYPJr#i_^hD;>AfjeAMXxX5K= zuie3H@Hf-bU^aPa&{#0*o`w24o2zTrRL9z-^Jws^+NSMa!MLG4Yw4Et``{0Y)b|AO|cf=9bGOx4$Z6G4;OCj zKB;>%$`4>;q-Uz7hdXX9y_B5KnAL9n@@}5q>Dk^d*fOv5HP-8K*(%C@zo~2^W!Jbe z>F-n@j<)8Rjw3TikrDczk{&5{zRq-Rci{~WX&h|VR^d}~*0X*Kmd$PQ%Lc#Jx*iXG zR|@w!{`X^*7^_Ub_=Kz{oKCfIc(!rShwaAc`13a7NLJCAj|Tgsu~ddk*Zf=z9+e@} z&j;P@VX72;?64WeViSx!8;r&FFdoiApij&v>}Oo8g*wS>5?xvv^aVp%44roqWmQ^ZguY>DS!_(%kR(3$6Vc-l0X! z?|-CthZfvHXMB~GL5qQyX0jLXN2?qzOyy8;Ch$)tf19-!Y(X zos3;ae3!Mh*0QV7Md6#U1AVV!kMSMR9wWa;mydkL#<_iRw?9ieM4zmySgR?Hy3pdO ziXjD9++G53F&})tDUHKEQ&s0--9E>ilR7Kx_8S)BZ@V+@-&+f8>sT{iPMl{wZ9mXk zeTjXAZ}s&~eOLY@uzzOso#6BK$;-HZV^f$)Ru*uQ)aHh?*Mc9AahcG2D5zl(F;e89qrg#0)On2R`-pvuTUgr zP5kueovh25v$bu5qxaCC@jiX(%*n(T;6Z)6gnbU;YL<&Tag*tR=Ty&h70=e@ruKT^ zzQ1ixbDg_2_=V>#enZo4>BJjs9JjY5-1Dq)6gwLywy$~DZ|JQ)4Lv^D&poKjbK=%y z>bt-!{7!)DZqB(J=Xu^@cc29M%BA_Q=QA8i9M#6kAnnEvi4=V|L3tiGc49A@Z8 zdL#V}Eu|l}d!7)!OUTUd=-nf8ks0mP>5dh}goFe6tE#U%ghH8M3H>SFFhpDPn|vG2 zY`WMreINC>)HvzcGuEJ?y~PiCr?G1IifK9iqQ$@Gxp%sIFuYS=77O*`WpJGnZ!%m< z&!~Rk4SO|~_Qs9JfxYxt^3Z+W4y%rEkt{CnQN6T|?lPc_&fp#F_ig)WyD@oaSJbvt zSgCV++4icZ_OyFh?fZPZ1-+&ijA8&9%VGlYxo+OuIY8>z*-+v<7Rw2I((mFfiis2= zcnxN?(U^>-bKQ0Nn#(ysodsNlPQ>9Wtlx@#V+`@g#I3}kpqY(Hoy?eOqcIsv zv^(wXt<4LYi{5qfTI9w&9Pyv{I~{|%nSOb;e(CQ!^lSAx-)eb46Bi`XEoO_XeBoQ{ zZrAPwZ()tZ`TX_72B_m{(pC?3{6nigwEhi%Cp|Bd@R{p;J^AUaYvukTaegad%utO{Lj2!7`=a<_x#ViUmU$3<30Z~@0aP` z&W*$g{hh?fM&7rz!kq${bwp0xad4;e`5MFFkbS`TxwGkgx#@X+(=+qWv}eAVXXZB> zm~=OS<}mK*7W#nJxxUA}$OumKg`r*oJ<>UWGtF(q#-zG{I{HmK)X|G~MmPMs3h(oL zfboCH@_ZJ4#q8&DXI%&PqiN4;`K0bml}sqMuKRdvU4^|JXS@3lEH<+keoFH)i0$#L zI0bTJac1DotZ#QsX2+N8nEYIh?)2-c5_^zZ-&pxN=Egh8^|1HRHrx7Z#mM`FBkZfe z!I;guCfG^E@Ki@J>k7CjafeVhacKIS%a{gd-S_!+;q)qv$9GD-4zBSm{I^>(j{o=k zISDrM8TZza#WB_bMnkQ!tUR$k-4j_sA*-R^at%^iQI zfUZzGwILQhr8yumeL7t9u|_MnHChgw!{{m2rBfCBE!}%6zLEdc5&SOjD!)4R zy$tu%HQf96`W@eIqkH6yFR+#BSMRMIFza{Aovh$2tU1Em2ZA5T_%>GLm#6yC?T2AI zlPRqQLR<5}DbtvttfjvQ&zb(9b8dgoEAEdwO&WB94tlOQ{GSy5V|+B9C)WKrX1g>N z_>kV${9N}Me~cN#ATe?<7mz}GCZ z*gMuqX>JaBud4nsGTPh*eyqV)t@RrF%Vw)a**k_e@NuV1m-jCpN!Kn*B&)1l)_Gra zo%2OkhkZZZ=dE3Khul5ZE*gKJZyB+>ca13Dfq3;B2SUtXt;_pTZm9T9YcBm(UFqR> z@LRaif7{3QYn{Kruhp8RSw9aPjn+E6kL9fvuQga>SfBWhn|<6c_>#)o(Yp!O6X+Fe z-d8WyT2VF+dvt}~rFB^s=owh5#0>fGY^mmWulLu9omA0PSFrKSz2sh=uVW1itisJI z%F#VGci}*NXzm(YYewa9XpGFb$efJ}Kk0juf3Ecdb*+x?*X*6&doY~#-)%UxI-|P# ztFg~qTjTg7&#u(qBf%+Nnu*`!Q%z8>jLtB-$Q%OiXWEXo`m2)Tqwsm`^BpCobd3KR zKO-M~4S!#Ddpdu=J^WEjt=VUjj_k*_Yu?bf6vw%=22`Aj{T2BMp`QFy+3}e8Ej4D&^~^WW$5q~%6q_)z?yJx7h9HX#pdfbu+Fb#PfWPUQTE zC{~qY!-72zdfsAj@w~MTW!{g%*McRjhkXverEle1g?THsrn2xp57yNyv#@6LeVe$a za=MNwCe@GL0*|{3#KqS%;z|MbB*X$d?2kp?a1d=RE(Ls+Q-_as%*U~Gms5As94)p5 zJ^h~EbE1vOC_HO)f=@TM*&ErlIy*dN+0BUYG+i~T#n z*We}DdA?G7ZTf|5KP~twHu6d1Z-iLR;3zgr{Dm&ionr_4myOhrZQ_@9F2ZyJ@~Hf% zkXyGO8d`ZcXEBP+!*@I4^)Ni7*o?{0LUJ53TqF^=W*<-;l*gjx2i;6aGg8`?vU4*u48?aA>+;X%#){GZ_bV{ z{f``w&*Hv&BLA(_RAzX2Ieb@YGr9V1EuT66+js5@ah~V?BDtRZoMY-+I->b$k6iGX zy&5gk_r0xu=Aigl_bUFbNi0^Vyn+Bn{EZ^{>1lbF zZ@-3l!`GZWtk%}{;tyZ@-}qjj^A)aQodCS@F@!4{H|)h|T-Gk#m+%)}R*X`6rD^}j zzQKpDAls6b!^2h%e~-^&+;bThIk0h~{3ea7GhB?B(Yg2iVwShn*Gm6wlKK3{Ix;h{r(7q51!n@8Rn{gvEtJ7b$-n|Klapzvur^lZbC?L8NW=SVg~ zOvc3uoqbn*`PSy+apsZp%1tiG5kAiLnj%3wbCnwBBaX`f~SX1tmuPeF;_RM;yKO5uI`?l+zKH;tPyJ#VI*zjH? zu7Y0FxtHi$a#i^++oNy1DW5a!BkTJ*!d0z^FM!U9e~(k|DDQPX#>(Man+N@6a}CcB zHhkyMpJ1zd*bD~V$*&db|BPO!6VzDo3(_{msn5_boVcVQt!jHhLnj1Mp=H zd3A90l4HMw*hk?7+3bVtrK;R`6Sztn7$3PWox@*zBfZmkBJHOuzF~Xg*lP0y*>4>p z|7ED?;_g~!slLT7-{5>gME(?YPAJvUJRAD2I=1Oz3Op-l4aR4ejK& zrF-(|EX{p@GjH*}TV9+%dmHkT4JP{7U2wjdVz1jNudH%=X8IOGWxnEi(f*Z}8J$1V z__T3({_L&lY%{st_O0xeyi?|ZO*EKGowNh~N+-|!)wued%3qU=OncUiy7D6|E=V3J z^L4S`(Ht2DhVb2)eSZD}eZRCFj;N#9A|3TlQu>6hteH`z8Y#)2 X#&O!B^`%J$s? z&XaTJwQ_-)^F`$A%2$-1xVzK&XL;j?Z{N9hPJcGT)!feFkLxrar#H;cS?BQ8Tvv5M zj!MhB{&!{G3E!HZ{%iiw!}2@6h`*q=df(DV6Ap!&mN+AP5bTKIF1k-z=UEM>w}Io= zIp5=8ajE5@7#)iEAvZZX@LeER_a^q2kyUW~z@`fuG*Q0XFn44~Z)9SbUEbDrLDzo& zt@M8GNjT5ZMRz6)k=Hpu{-Ner^RYq21o`&gh{k!$--$lNT@Sl`&VMs5G!K>SqfNl2 ztK;Yz-p4$rGTrMT|7`#tR{Ix2@?*i7`uH{9XLFl&W1~~d28Xev<0^CXohqkGt(^JV zJcAE~?%W(=(7;cfR7O*Ua`^zb>imM&o($C)NO zpC9GGXm8wLB8J!wro(MuN)|X?(jK+W!5XiLCd@f0B>u1bC9jJ{*^5C>g)_r~PkpF< zq2T(`Z^gVFeeT9qM0=C{t$twPPkv+8`@-^@0*qt+ch*j4{WHP0+ppF2TX`(CF8*Va z)0}Sl3G)-o8GjF*(44oTevC)(5d)8y{~LJ3`;PMC+RZyWfZFLJ*>ehu``!1r3g@$NQo@Qt$Bz@hz6 z?dzm8m46pW4e>-)B~!Ydz;xPMhLmXWL1h<=BExfKTCN^4yQL&W3L z=<8whQlOjkPoP`KWp_Hu*_LLUkH4oC2JwSpZt6>Gkw8CiGj-JGB6Ogy9gGeAmW`QR z`$E0z3cDYKSe*stnJIi**@bP1eBS4b&+si>zLmE-ytDFc z?fLJ%`&;Kxmyb&Sg42A~zAJ3quJSVF6Kj(6eees9;uo?HF*Vk8<=!yI0F(OCT{`M7 zF4Hr5)pY(?7d}N_$KG*2mg?!b*`HDw?=pT*UcCTow{**OtP6pmgnYA~ZR6ziG5fNH z?^eG~^4?o5V2cGum}4HCp|0$50lPfg`i0B#>Na3#H&)?hY}PDbDlC9@v#EC@_q@%f z-lrWcXTn9{f8S`dn6_SD?^L047UQZObZCZe+P>k*0y)RcZSwe_nGa7t;$;h&n0SPB z&Q+{^yv(HQ65^o~tWC5o8SnOcO5m@#|1sLP!jPqJ^Z4R#<7vLLr147&xR2+m_Pkv| zSA_L5xo67?1H|pJ0z(v2Mf;)Gpcg^#S z$1nZQd}I7U?B|2MvY}V#-59?^pL*8+_&KjnjSKx}!>R}(;D0FH|F6@?ONfTo|p6bQCSPm2Ade5G7DIApiu2%V^mEZH##%XxI+a<}ao~9*H}Mqbaq7~cIXoKoWjLe0;#BONRqtdS9*-CQwz}do)kVh+{V-d%zWzqbH9(i4 zio7n8e=!66g2(h$(9st-zoX{;4aG&>yq&M3992F0J*PB&qBXCLi%&NJKXxy6>z2@u zV(um66Q4kS0sqSbzZ zr|q6kQ-AoX79(db=ea)5T8C+n>*Q`wPfbpF>QZ z`)s2p>h11rBoc8afXEc|i zFYi|N?Qh{sgZuXM=|1r^{Jir*^1wRJh^IRqKQo?QomU^)w&UqddG*0byZZO#)rY2S zdHOxz%AakId3vDuaeqheC$1K6Xg@`N%U5!q^jH4VEeE*2PyR8vTMy{|hCxU$G30BSQ`O;cg7^1#=PK?Het;AetNB zA$#0sIwD`j_>O%;@m;Oo=i(rTWB4`W>~k5f-P+)_-aNV42)?>J@4W)K**u-xTseEW zk-SJIW|WmTOBQ|&7>pPgOr(QdG*|fK{|2q0-yFXK?{1fwgC=|%_G>d(O!ynIwMX~5sJZb;yEYA66?_r7Wm^FD{lT<6=Y;Un~_s8yZuM9h) zm^AeQ+_EhxeJMBku4zx-!%wurZN4nymHfI&IY-a*%XZy8`7Qha&M5A^lyNU*Z-c)t zUfP-1owBlH@;!9dQYYodik$~EcRRO*ZQdT>?t9k=Fxt6ajir7SOB$q(>I>!p>YUp7 z7S61O^O8}HJ!`FSHag^&zl2XFyR_Tu8=Z;Oxi9Sv7>-s%xMVJ&EX5@-gt7)MU4FXl zDbwC?xuQKT2M2O+$@t1+%ixl+4gjliW&=Lgg1fcC=M1>4t%uh5le@zmXT*~#K9@^( zGY?&AFNNt(fJ{x z&8TvV+T{Lgod$iH?@Tt_u8U|8=t5aagFqL`8Z?ObnR0`L*x2Sa`fByltu#J8a~7lk<(< z@MkQ3(s(}b>f`z88Swo6Y4A)=sO9&(DtJCa-)HbMt+=R1zK`AxM%Gek+%FUB zXk$ASJKFAb+WFFF@Fl#TyH`B^xmGyD$FYwm`T0rO?7`2^4C7A{zSV#D>cbawStkEv z7Cyz`|Je$Q{2S%LoQaS6O4_8pW3i#11;6QW%F;Q(*XS{qcSX7B=i)T_xtc#G(~;mr zjxqFatw#{1m%9PJAcCPmPulEOAbT;!88Za^IKoVNo8U;-I?sJ>=?VV%M)R zT$>Kg3|GkIZwnW=+DE>M;`y8>pE_2=zhqwRbnm9;TgOyB%kK0ZNPpiuIBD0%>cg#l z$Zu0Fv|@OQBOk-A;9FZxi_UF}M($j899xcZCeLsltH0a|!^>WI|>gUgD$CW53dGUSFEa;ug#wgI%*bt-SL%H_@(=xrP*DrEp)EW=EQj6 zT#91-lEExp-bBpDuVG%T`5;?oqB91+`Wnrr*-w&wg-6@*_xPW+!eKEXbiLDE(OHgI zM?m|qx0Xk9?d`PC99oO#UI2Z#6P)m+ZW|PUlZTknvKN?XN84r?rEBZT}*9YjHB7u!2gT44*m zlWZFKi!X-tTLxdT4(NArh2Xy&!zSm!o9c%c-ZSL$@o(hA8{}Dj4fn|y-(~!VHuTMd z^VQ(J!N1OjQ|rB-M!XJwGaud=@!B_seZ_OxS3Hk>MfS?sCr`iC$^LSlTZd$D}vIrXJ?tMonA-Ihyav0J`l*{=Wb|0mnOVAdH9!KuID+*_#!Tf-i=^*5j2 z*Gg8{_h!#7)EQ%+PC0zGx68h;zQb+%(tSE-dgn-G!KCd64DK(yFiyPeVE^LDgX~=% z?7w(&|1*Ub4(?do^VGBa-L|+#eF_%EaHWeiF8An64ehw2M|a^4ZR77fTHCR=NFD9R zyYD~|+tdF0(L!VYeGPhiEwRZvd#m+(xVPzYeV0z(hSfdpw@=PC-Zl{CQF8N8?31z4 zYdtnEgPA@Bv*0T7H`YUH@92H!$nK8Nhj66zw_>*r_ORE^_f!>kH=7jqJxIB5qP}#F z@_*p3-s`){iU&D-HPP>CaHM#)#t{9wL_d!^IM>KU*3e%<|QZVb(BU{q&_-o_s1P58TW+TL+E z`10}|=|knlBpVBvT-Dw-aw8iG!kO$lATai4|)ZD3T2ypSvbq4eUKsAKkDzKkbH)^scv!n zVsR7IHC`LNt*vg^G?A5ks}Edxy5*0d$C>Ju`;KS(4?0DB-J)BhPXcfDiHBTHP0By$ z72VrmeA$O?Vg9CD{Mqw*&@H3rmUTMMj&AY#W&c;uDKB5t^VFa5_vl4Vx6nT57R%Y= z4ugG1-8s+_ch}f=M}SB7Y7D)6dry5Ey5=Z<@8>UVP3NHdV!0hCMmay|o3VB1oNeeF zau|-G8%^gh_fPU2j#=v*KflU!I)^?@=KxoW&Y`{IgLgJ`4)xpWoMMyC@qG_co~v_s zpXwZbXXq;3AUzZ4D*YXBhb~HKi@s^jWlDKP_X_0c9O;?RFZwI(7hRP0%bttA4`Dom zUNc?B_@;Awzov8i?}pB~8J+Vs&Xanb)5ZtmJdo|_1m8$;6zO~6DeH4izt5QZm+a@H z{)E>LI+Kw46J9^4tReH!UKZt=12(wY&fa(YOI9Y0H^8Iz_WXTPd*WO(-SvPCYI`5n z>fHY9+npxEt-55z$mU#KLX37AKjrYhX5m(x#O#m!60<+Fmt8aagYGcfH}fysq^Fa2XsHOEUj} zwWWB{!vc>eo?$$RT@f5kKJNmScnu%zg|=C<$mT)k+~!6|aX z8J;Tk`U=Cbp3VP`o-Z+ZY`pJoeCyn7>Sev;;=@o^3TOTcHR(M8GQ4Xyl>7o+ec#myxm@S zDxdh)T4NkJJNVTmd#}hhujKgVg7sDP$KM1EUkiU+{r))p@f_P|zBoE!w!Zl4<*mNB zY-+8`=?lg3q+^^99>x4}##Zjtze&4%&hD1=h-{GgDzAx-SuxTR+b`)9^QpY9mrupG z;-%DfHThGoi5(UH`h6I)IoNT(&V~mfSp2%o>?pS8wX&nD7v$094DG1qB;Vk*wxc=n zpfR;hoS4_}8MU6pr*r%3l*?{W*4Ag-bzu$;G#C6M^@9(xRehi%GkKnaZQ3Pe=q25c zjtKW}DCRD>l0xGD`a0~b;Y$G9q>E=UuG*_^x_;xg;|BiKGism(q|pW7?Z z_bsQ}D-f&(KRg)r2Qqm#ebF8xF=o+Vw~yoKcY(F0berZstGh`0vwJnu?uRXErIqF^ zcojonkMY-%&L=O%>Z_c4>Q@iQ-Y;b8lOm5!Mo@-8@DQ~?!2Zm{Q zs~vt0FKdNmTArC^48sxnM7(Brx+B0iz2A}QS?Q}+m={03y!|{JUSE3{I#!+m41GDd zqr_@bTyS1J;KJ6$!RAnIJkQy6Xqu}B?pu`)hwxx~HLTHV2yVN-NN}Ho)yV-UzAg@ zu($2Ly81F1THs`;l(_q7GvoWd0!|XX_nO+?cH>0lrN#73UU_pH%kM+qbyxY{sqxEu zn(Iivm3SZgn6`I#f-}WSGwr?pmS4A$yNG#bcg@AWH7DVqZaO#0KNyuQ=-{k|`<|5U z@)Yj|8WCSPARj}03&wEH+txwoCg9zmIUVO6XZ-fSmm4zi#{R6W5e%Vk^_#)}Phnln z85~R{&pUY){*prDOkFM?$9_P_?YSq}Aid*!?-i5m{cRx5Vt%B>rx?TE5B{RYGo8fP z!gINh7+)L-x}0+HfW}rm(Q7*UE}X0GOt@~T6Y++9V+(oto2afVJG7a9uWQ0 zJAI7xz99x(9yY{Ma+fzFW?1{3z zVXU8s?)kUCr%iM=`u&9D{I#$RL5ENLhu?m2%iBNskJ6_J>)wv&F76gv)aPudc!qE7 zRaea6e<`V^b{D>kc`J;ewZHn7u50PDsm+=2W9Ywa-yUB18WYH21v$YkSCOX&#{GRU zvLz}naTktcM&GBBJu6wgV*cqL-Q(m0z*_S_G^1iD5?$jOXZrvK5^_|I;=#sDo z?jqKpn4D~womE0-=$+BX``hAqi%qd+!Yz((Z$|Lx8&Ba| zkERPkuB6xT0gf{Mv3PB}$Jh5cOBvR>!ma8}=L?#jO`k(i|BAE1XNLbe@ANF57Cmi^ z0Uc#yBwLL!qV+%%zvcQDeHqziUGV5{mF7e|8@(3~z}Fk5tFz)Fbi;Yc)QStEKAPar zT+LSHrHFh{1$&xa3%N5N+@fp6RK zs5^fl`U!tAA2WvoD$^R))|rdA2hP(vtX1>wnnHf7VO?RoO>9){TKxmXD?72-oE=B* z?HqL9@)50hi5oL!`GELe=OEw{`|f6?oK)@G(fgg9SHH^B7Vjn-o&1=rm7Pvwt-Doo zqh2OP+;MxW9Is?OrM6n9PjE;31m~nfpVB4j$NArl?*`Ubw+n_mJOrEz2R47hZzQ{; z4LuR$aK<)Gyf&>}@q7~NVfeejCwcFYJ=F<3wc?zpe$&{2&lB1d(9N2o@lg|R=ix{9 zUxm5quBqAJ=ihvrfN$c_CRfCx!F9vW@bC35SFYRPf7MyD@1+Hxv^a;_Q?CMy_R7bU*R-a0dYKv@-mp?>wn(`rRBKJ5yej&iZTp zFd=;w*9rC%{7F9rJuuE1Ht^U{iHQz$9I1H>wyfjzY@Y<30t@_bc9utq| zjuF)jey7=A#`&?W;qzc%e8sq5F$~aGYkJ9F3En9E^MCxu8gcfN|NVQD5%4*eHH`S9 zqP0w6?mT>lYF*{5bFAFfK$NrI@#V%(6?laDdqj6DN7k)Af2|z)r(?c`^9=ew9jjkF z(1>ja57H^=H_@2eEA}PZ^$nh9wCOp1rq*Rz*T}!DZEN^w@j0^T)=RVYgChLTx=o+s zqgTxz^8JY(3B1#eUenvpEN#!EZpsIdtXD_wmGtuv2A-C`_W@-0k8IzX=n>>bHfuLy z$!6`!^D{hOh?i5D@MR_|?91hrzq@rm&FVkarW~K7xt{=|^!HC}t<#sy!;zn*JEZhI z$`d1ovS)hFe)~y{GwXE&x})v7A-0F;pP$9n ze69mD3~MLZr|ED#tuN+f`=`R*G|~EJF&HPO_24H#=by!3T+A)RIR4$lVEVJ`ry!3r z##NeZR#>-&xM1o#$rocU0pH2K5pO;@`<&?fY&wl8cwtCV0~~utKMTnI2%=d;BfeE3FX+3 z1&V_n=l8s`9)0ophvz*}dGDom=9Y8x_MUS-)$c0a$#}=98~URDEo&cs_K7n0H*9(D z+L18ljPK|5o9q(v#~;oArn_vR;wJKKWD}>0OJ-;T%x!Jg40ae<&dpPk4VL{i+Y4O1 ze1{?V;PBNM9Y2%aF3Kk>@*N9ugF5;X8w$G~OtB>+Glb z4EPRd{F2|It8gY*E1j-wrR@LCcQz{CgHsMoZtRCKW{l6|;4$pYSzOTX%VqFG8|Ai+ z!JFgok7DD}o#H9+ws=l^b0xkD+Z*_ieo6})&yqdiN4U}5V(Ig#Cy(gv^!Qo$AdZ^* zEpgkIX3P-pYlm|Pu1bC4l-65eh5ohax9e6oMtT-|9@{s<&H^j6T0W))W zG=5%?JTwqLGp9rT*=WYxD}{>X^ezl^w6Y;9J1>#C|8g11_TYexThN>UMeB2^U=+7i)-rLep0T7w`kP7~#xz zHN}OM4Ov-=3oF}hWhpMKtZro~E}}BS#S-oe`!VuGy4T_PBG4WF=lgicLrGYR>sh?+ zp93B1#nPVz-dG&b*Ofc|xS*xn@rdqQ%l6}VLf;F|EB{IG%8!H}XbT>02>g=bCoOk8 zlj0{WcYKrLCoOlplj5hTyoG=IT-;aweArh_*Wmp=#(}Po9yeVFjiu{?oNQ^M=cL22 z0aeZ_R;B0c`T2t*dX}EE=Tm>bTh9j51@JU>-{c!vZO+9GXPLsDP2%IP8T|~ru|1GY ztursnmr3hKI-#llv}>BCy6RW>2>C0`aaV9?`!d0<+kAgg`33PB=%*n5+xr^Ee=Cu% z5yyW!_*S#*qj*jBSAIzF5rW@e!N-`@-ewao#5C4EWo|CtXe&=!j)!>YOyVc=o8l+) zbK)n_T&Inn#QvP;3*{FIu8vkPK@4pcjtHcVxXW$A24ZI{hGdS6|KhgK=CpPbX>rSUj{l z+!G7#q;Iu%r~c~X)0UoJc|!SF;y1yUfumSNPtC)R?$|rDEqOulX5#Vbw++>9pty{< z)KcybTROSFf63%iJC^j6isTWjqwab9UF-a1-Pbi-)V&JC%fXMu%hv$k8uSzXO^LM$ zy87gn;JY#BQknHi(A#>RpPURfTY3$fezF#9x}Fy%ClAd@w*C@x`L5=}-4`0SOwQ&o zFcu!;j2AF;AXA$EAs<^GfTrR8Ce!iO)_o77uqxTBK01{5NgsB16m7rJTu1$~rc~V` zb@x;EDaO}0|988(v?;#ZwV6Ykr)cxEZ*!0~!cFPj$;r7MCqp~#?5PKsX}gcHbZ?u+ zkPkM{RHnInn*aXp2|FK})}d{f%Tu8a_#c9X8rSkew5QSKXjCe6oc#1zZZBg9d>KD8 zp71%*g^ohL1D-;^dMCRR>U7h8T5lsb@~~Sy;m-F(y}fBY<{#J7Sn4Oyc=6a#+Ya7^ zHZ|ld)GJXwc9cC7)*8YY-_RhJ$-%Gw_VK%U4#J`EP7brbrw^W@UNbKx{w!jOUq3g= z$41ugzMFWi;t;$m zg?f~i=tFs8TAvLu*R!Z2IP^arcR%-g36Iiu$A5VGV+YX_>bJT5zwo_R)1#4|k-6!b9puVa^XyLm90`8izE2HG*+#vQOBP}GP7V*61ei&KF#_MK} z&#OE>>FcEW5^k;EcpjbxiPzgHF2RlAlDQf#fivJT@j63!KPfJmr{*G^Z#bpB=7P+m zb>y2?3cQC;gnPX=e1R*K8NO&A@U`-X9!IUs|T$)Cu1+Fj@j<+)nUkeWXYRrh(@)JR#prI1`=)U%>Od zbDTY~_rTZ8(*ucn53kvGuC5Wjp~+aE!?}D%c)M-?*dvpP0rcaTr{!kfMtBbWu2H{& zF$ad;xR0iBL;FR-XEa6zuADvtT=87;;4=Oh)pxxjT)z^1+t-jO>GFXt@?GJBkk1ix zlI^!4J1Nh584;ft-GZEZoCv-ka}C~b`a$J^&b1$&ZFE>n{m=$k*c)Qd2jGbkbWu)- z=()-JyFu5)ec^i>%>(^{jXvRRuIdIoI_~?iF*OEyVhDNBbJ{oM5uB#8^xoP;eI52b zws5x(S_?;^j^uPU^SAnE07s2EdHizMP+~S?0zQ>&!$VuuU#%r)=#Jc8w#Oo>5jd376YAme}_1!yt z|IXlDCm$fYJ}jLooLuW;F!i;fy}RSa{NW;NpKXN~j&klO@MC>VqfU+f_JulKjXLI& z@@|}VM}7Hm`eq;P#S(Wrafji_d4cXIqbNE9}+2%YI-x#u&%&&tGDkm)P47oMucjjuSe;IsxPY*f%J+@vjJBc5Kew)e9 zmhO{nIJNVw+@T%(?Z&+=Gs(}m<^vw>u0rUpyJ3U;XpJMfH}gl0xu{I`P&hK21z&-E zA@N8z{39M-<@J*KQF%sIhz0LT^#pw~ewlV54iNe_nW3)nATi@0Gyecxe~x|)M%u>m zVK7qG1f$7um{Tkt^(T`J&E=_1g>Uv9jdUvWh~;HEe4Ht4rc+)2=1*&%E%-~RPW8Tm zcx{6B;wz1#b){@}@Mp)Kbh>w1UMq_J@ESZfBd@_{wW4?}`o83-Y`6a|Ew(FsV^wEO zf}JaeSQ7kPLPkq*3<>@%5ktB+#FX@`7!oleo0Fc2A>A8vjGh%k!e7>UMZOGup0EEy zFR$p~+%Gy@XP-FpWW2b%#QHrVqMqYtXg1uMb;au5H0==-ocXw4At%%l&L_ zv#Ys{?Dt05-@7M^z_Xti!c%S9?r7JRK8o*leYhCZ^2w*$^l=}3Xs#t2a~bxp0l&VL z8{jS#xDU2(*~n?`YLddi9}NSqx#UUpgU#tZ+pp{PH|)Gy>!4Hc z)zn7%O|%qm2Hm3js4MWT{O<5x{fcMObsc*{_Xb~!@f9CYebIG-zQZ~No$ho4`_}2Y zj=Jd>+(QM=EvPeB=K0WLzE8oXd0T7*T^HtDU-aa~fuDkYiRTg8315GR4r=BBllhnL z=}F+p%=d`(H=;&UTDKxqOE)= zhZA%n`Yg3YjH5nF*rNH-{)ENUu}8M|fo(L{!2OZJnNP|O-sbiK zTP6nRd8_92lKC{WGuyy8&X!C+U&Tj$Qu8&Qi0~!85HCDB;nUtn z8^9i@pAh4<{^2!!^HSx+VNE7ecz`wdiqCr<7LO^GAiUXk7`zTK{_y3(t?{8`nfmHO z^=&-fNw=v^EK^0l=PR7dnjYZC^oag4*69fMWyzM{&{~ss6Ucu??x^eWZGO3QP)5#Z zYqpBNEpn!Q<(FxG^b3v^4>3NHUJW=7v01@kd_!HygFC;Joa~-&XP8cWEyP>FRZL^W zZ^?&i>u;F6L*HKwaevy!`X}Hs^4d~(xr9$s-!SmI*tG@aPM^o@wn0r7xO&`^gd|1X& ze}OM_t}LeIbaZS7>)aV&#lL*rv7TG-@v9}D zgQGPn_yHf-R?eh@v#RQYx9SnkOKK}WO0oTwt>~VNbWdS$41B}qj zV1&+%+<#yshGKd-h7sB}!&rL|xE}Oykqc>X6}66y;F2vw*U4`&xTt4xOFr;R0WNSQ zJ{7$+hFh1DhXLONybgu$_<%q0RGQn)Z}qGEd+~{lO&(VT9TDc6wq;(@O=(+xtL+K& zhtqf5)d2rqUzkU%)b){x?M&mV205*q=y9WXJ6Am?Yn4f5&zWy5A2l*1; zINT*u%0n{$MR0dE<`T-Foy|qhZRaAp4^HE`IG@k>!p#MkH5c9K8|#dE5qT?SWj>t~ z^RE@7Iq_`x0e;Qt8=jvu-^_Yww2{9cUu%ppba$8RUhuQ+o(S-o$(6tstA3rjrNW`V zDJ^Kq)rj6}zT%_6Kj^^U^bS?^sdzzVUceIP;h8TW{{R?D5?}lUQY#G;lJ%xojVU5 z{_xx<&}H_GH}`Ef&NeC5HXpwMohf@*vzVO8>V^U_DvjadTPr5zYkw2I3UjTY$AT=# z?%A4=Tx{cS^$8D2KZ&2bPH6OLWnn*4YxcJcpB~Nmu08h9d=uGDkDo(+y=ea3Py4wr z@P0<;!sh)HoC^cypUQJ#eNoH{nUbyua{8*@m6tcx&+_WQclku`Pa?1W)x0yTNq5Tz zN~cYTpQgJ9EN6WpUqNT_D)Xh|e!O?L-TNBSVRM`F4P|n}e_Z$5lo+e69`f=jy8X^nFg0QtgCsNxG$Su0m< z?H-?9<@;l$=CiA;-SK1XOqb+RYx&BlCwHf3SN*g6+teo;jeJ{VQvQMbs&EgcY{ijj zzv&R=>h^c?jqeNj#`i+*#_ine_Q{uXhtRUg{e8UIkx!#MqM^Qb@%>4@L5WZ1 za*dR8w3PkTca4~jR$Mw_YaYH)S@LIKC&HNkmFWy@@Z0pvS;v)rk6Yih6mAZM=i*&y zAHzQ8#`lk1ol>%OE%>l!KTh}0xjouYr}x}s>pe|%hA+(4IkYg@`q8F3-Al4{dY2|! zztU7^cvZH}q1DOO@2k#^yZnCV+xfQgR=%y=X}RCeTJF`IlV#|l+z+k)-uLj(5q*Dq zE_UWPd#wlhE*_E15k8jESALs1Z7z!1EVefHf>Zgdxou9V&7!EyMb<`VkCkKkb*%~e z?_j?y%Gc@Rd(=CY_DtEk+ne6qVehoBGt_@u)4SX4o$~y0VR=}vye@)e(X8fNzb)aO z#ek>R&1%l|n;LD7Ka*62+c4+fG1%YH1p962oa?t~EgIwF9qF9wH)Y?|pCRuRT(zC^ zsi7nF>mpb_=3tqY&d{uWZPeyNuFXtzuCI^Ud>}jLJD79*x+d5^=3p0{_1*b+&h-zu zccOEBebc)S%rs}#}oRD%jUG!G3=<=QU05Zj0tj z+$e*O>t-=$)|DwN>t{J<){|+QHM5*Ex;|~QC_CrB=Uj@`u^HIcM{{Pant8V-nlnB| z=G~&koTc9_HbKl;HcGmDpN~70l&_7=Q=W*uL$8N-oAvJOq^kIv-s!h&1a{W!q<*t* zuBN_?x6i>>wY5*Y9*JyUh=GKdzP^#6J-qa;Qs$G1f#kHc*a`Twc8Z%cx5Oz3gd=(5U<&4^rXJ6 zQ8kZX;{_M}$MY)Fr_Q5JdxQO`q|p1 zg`S3I==_S$Z#T)6trelM){45TN49q&@Fw!5@2!-qoE)~mdz2@FfpX(No^>u*F*U)Z z`Yrqs+VEa&EdETr%6#V&S2`1m^=EW|muK|PwD;%+{WGDpGW_3mea77F4jXj0^sfBy zIy#jYqxL-P9+)j*j_A|a{}&9B8O0RsP6s_df6#IU?M??hpZfbFdN00K%r@Lpa%$&6 z&IVW<^&`xEZ;+8wJD(pqMBY^Np7^pWJN?A`Lg(w>f2csfQ~&JwqN{%+eLh_{H+fT% zyy>aJ?-r7J;R@AJ89%Q0V&Tz_*}O6y`@F47zg$Ruqj1q*7Lwm9Y_~S$_n$LRNdA|%G6K(PbM@9Q zNANC4KEhl+QkX0P$?nddclAye&QJb;8({um-VZu{_nhQYKX(lC<6+QTN?Vhw1=AA? z3TM6kNMUz}M~JI)ZUX#Qf4NWtryVOpySF9D+e+_GZnt*8@z%TkHi9ENH{nck|8n8? z3ds)&=u3yQUrv%={$2-@{LA9|UD*QWoACb+3UvYz9ja+{&r1%W(GGQ#j&&qQIvhS* z%Y=`&-WB8HyyOV|9Vxv2KX)YmBx2bdSAK-aG|kb$N9`Xp=-gU%j?w;{yOLf9!-C{* z>HlvFC79t@;fwrs@SdN*G5r5jXT2l&|JZvI=qie5e|)-oW(Wj?5YvDc^|D1!2n0|O z5eXqcWR=Ygje%SslFi(N#WifoCWKAdR3eCgsH_TZfTDt;21Er!WhX2mAR-{>zpA?D z&Llw4_rB-7cmC(+Ip$MSQ@vMLS9kSX*4r!S1&n!wu?hY~p-m!ev547s{iuE0%2?reuszsw;0DIpXVrEgi#3nRLIwhQye}nhXx=Y$F=Ef4uMrw zK_z>w@vvb$JcmuiaRse1=7@cuIL1f|6b}D``aWr3o8D((ux=Ldo#qB%6qaeRqO7qZ ztUb`9%Z3$}HO`b>E8t;$g%;y08w;+&7w4}ETTt0}zhX0>M^>u7zOu2Q67m;EwAfYI zIA8gcjY8En@txI;k=M;;@U-^&k!+jac)!L1eA-tt%c(;p<0!Q0C^w!k(yG(PuZ5+3!v*~4w(P4$qjBRC$1U#&*0C|kRYBAEQ zBIbqK8UZ6{!2e-T{82WNrikjh>eE?a@xCiLUX_jYP{;LrG}|d)c-K>6s8=OpJdoo} zKkVZdn{B91%hP-+pJudI&k>-R2K>L3od@L>J)Cws{KaA(H|AJ+EK!FRLt7V%CD!|v zu`>)Zf&ZYaYYjYYSjBqJE^AEZ>nklOYaFlK3Lch3rZ_T{3>s+GW;E(6mJf5rY-)CO zG~ehKzdSgT3Tk`m;pwO^rXa$U}{RB zdro$0Mt>tcse4Xx-+?iK^sJnrb?X_e1N~Doy7YM@&^O0O$Vkx?q$(!nkB;^`tUi#u zZoQbCp;-a{9e4QSyL9ZB5Z@!QOXs8>_jOH3>eBiycvShuWoIW3#cUPv-^ewtTerCT zRHnKi@tGMpsTsL$yNWflcW_8yJpW;aRX@E&I=_7@JY8q_vfCu*B>QXJ;m5{gOp`gS5Oj6exr1`C(!v?m_?bk1motU27KhQ2a z8S0n}b73gxdNQYaIxMXuBS6f&oWp{K^kYII2H=?t(%?%oL*p~GP65nrjXs~o)F)q zbDN}IU~IZ{OG@a}wa0z+N-v^D@iJnAIiWJ1cmk%sS3|uOfR`o9hnY~k{BDVz+aME#GF8SkPHPW&x6XuLQ%as#U*v`**dOc$1d@4uBC0; zEg|7w78c_*-v7CrvA6zHIbW_j{5QcUczdc-YD!94K+Urjca#`E9NZvGj=ERS2^!c? z=U*l>Y~DY~4A&B}E2y$$1yFf=bx7*mvt!4k_HmutbWFHHeH`_O$s`NuZ+VAbQKb)b z-ChIR1O}z{4WLD>M@2Vi*)EVn^S^y^26R;2dRUo~`QPM^Ej?#9FU-0`CfF&ZCr6Wu zdJSEuIv472i9RnqlTf7{pQDrsfd(!lA6mI^7+@0NIFw#gD)xABJ#%NIA1r}0#-(TD zG#k{=(`>KydU7pAu}d#S>j)@647D1b(i4CSmz$0nio&Q4p>+NR&}->EVIjv?H4S)y za%%kyQXAknAwD|*-UTTYc^=&YkLJR%Y><*rp0MDlCtS*L-6>_LjI4`t@~l)w{D45; zf$^Ey*|}Nh5E(TRgS}F-(RTs2Rk?DV5;}G1b|1K)-MaPciiXkOCZPu`2nlWcacz4f zbo2L$>zLRkj-Ert6a1w3Zizh-hEtIhlUY7=K!7Mj$!caj-Ih z3i?wrbNi$P{IJ6J9q1oC0Gug*YL0&}yh%=jPDvSxDN|4qy=;FUa!6tfYM}9NRu5XB z|EKC#({QGWawO4e>Wu;Fx@&_j7SrZli`*2Nb8bdjA_)3qgv7Fy#*jJndllCxWp#!r zec22ys#z)Psmi28n>{Oi$vSvn>Al0hRveEyl&&{k4TBv>iSiOVgF1J1G#1|D?9@W; zEb@Z}_jBB5+7$RxGcM9+m;lbwK%<3Q4Khc_?hFp5o1w#TZ2``5^fyzwd;O)0Yl3^L z(rOT&763;*E3O~xtEep(nM}``?wHZb8K9(&nS%q_aVaU;fnc!YD0-CovgvzfWx459 z*B7@5S+KqH4^GV);Pz(!OxR=$NH1v}>bt|7>RN|B0W=GjvJuWudz^C5W(^IrYqDYa z_0E~nllH-#QG=F=LG0cu`9WH^Yj$9e5+~2)A1#?WDJqUi`Y+4$CM;e)8G$f(~%9Xwb4`h91$Jcp=Rtt0OLhfDZ zne>Bkqn0`A=#||UnsNUkSFBj+;^|4A9kuc#vtzWb2I>yd2M+N$L+bmzjdq@}%~x=F zRWkg0`HufD*0bck*jwK#)XE(s9I`9y;z8f~!l45k;GlWL1UPSjywbFD6XMkICiYJH z(nly6&x|}?CRuNKEYh{K5$m>AE#HojW|cxy>LyLL_JmK4{fO}B*Z-IH1;_VD*j z&VYj;Wk54C;I!_Nwm8P90f%@YORf$({<9_2G#nQuv>8cHb{9!k zzZ%88BRo93bV>g_$6}>}6OiqPQ`+Rx%k8O8U9VX7W%H})IX_P92fJF=GQg-b^5+Z) zWH=f|Lz$d06b2it3Au&fCMIU|h5Z*CyE)Ul1|4xy7nkVvlA<*)UGIy&2gU>4viU&H9Gk2R>0rT5am&AX=2Es&l$D1gS88fj3Oxc+*Ue#La#5BobzfNt~y zFe3s(=q!{{LJvZN$S+d)X`M+WwrY_-XD}SxTt=HOFJIH3vivQeYFM=1r0WFff$aWZ zJ_1<~>i`>_<#G9cX!LO8MNXeC;>baZ&V59m3(lRsp z2SdA)G^j4L7YlHE@#3Pclml#npbOlwO3lbndkI*qaL5{Y2L{Vbq`|q^n&c!83}n<$ z2M(StiE&)#OWPHd%264234l`F2MUWkFd&eer3%O6C3lM9LRn0lmfm9G&4Oid zTv89CduDpTKS&*hc(M!nVHc!gCuksxhfZ?);wcKO7clR^k~*oWlT|3CL?P)(onY}t zLxqEspyGwp760+#YZ?%3%n3s93B8S=;hbi6g4vioR8JGAwAw}KND#CybPxmYf;p*a zY5w$NSpQREC^V!_L{%PYSfNSDSs|ZC`6vJFvTDKz4$w74?_RI(i4dSs1LcnQzZoD1 z;)dE@1A2y{Ur}35$9TPXwrvjJW*+NXm=`}niEPhCiY6b$HwuT&Pn%UsILs8TlG~gw}lRY!i;q^Zm z;9Qghm2q_m5A|^2LGzTFnHvOWQ;EQ-3x#?+CHGJ53ysPR(CLLc*^Ro5>e0%IwFk0} zzbU!Q)S5>1LgCrVXV4l9nhItrg)#=Xxzm9@I+m(;!78}#l6o|6?s20j5Z@Z2MIEr{ zl6lZvP??}9fHjJ?aUMg`5nO;YwMC8bw@FR$56#R)Zv~wvvgl+&^!66!UUmRBKdFNP zXyL)DfO!g+3Np}FC36$-WL=;77KH{6$b?f=ubSU!xa#Yv0xWSQn}CKBT7XXF|DD={ zp9;fAQD{AS?f}tIF=|yv-@IXMWt)}Na?^mJzm&mp1^!W{IGwXPC&PkK!Z?u{xS4?% zo!Ns_Nloh9rE`Mo2{{G=c)oKp0|w1Gh!>#S3FDvIH>ma00H$;1<$0;cH_#OmXT?LDpT8B=q zhzX}+u`&J7|Owkh(iXSp~6nDX0)=#)hDXYjT2U z%e?9O;Jv(yQ_x;iIZaop8EOq#pnD+1m71Z3>C&xCYMTr<050>cb#hvAM&AGo0PKl% zzzo|)R|A~I>N08D!ba+H`NAp@fLpDD0tPk{mx~^mp@V=RX$LM!KOP@E{*ThH z&Y&$NUf^C^cWh=hq|Q=mimC$#B>E6V?JF^lVy{!^0?tKjIO+kr-`5G&dJeqd+M4!v zI<$uyZ1<`NZ<4yykaSOC=O&Hm>>D??;CT1R%>hUI(cDzHtm%iUV+Y^~rgpyu!Ol4u z?79I0FD50 z>ywq8*%yMTkSsm9Z+0fESkN`FU}g?(T(@p4td#B}s9v$2QOCSFsexcjS}NF(V2rvB zQSyG~zsn;dGb26|55+Tb;^4k0TG)Ct(BAQas=!DG;p9f2R~U)uc&S(QMcdpAIs^$O zsXM1eRtdg$miJJXL2$g<+}Am-Q(}Blubv${#C1+;+p{y>ckYh8h)(Pv^dd4dg6P;{ zui|bgBXcnHuG$jOw1@sGQCjong9gG{0Y`+SAkb}nF@)`h>so$#qZG~M2mN04bXBu? z^O6mT@ppkO7K~j0Zzur*xO6KzUZ`J(7=*ASZbH|L;~Faut2zI1y#E|8{QHx^%_ri zp+HujVX55USt~H!+IHVkZul}CE(~T34GBZ{UalIAK^*51W~4j7Q%QVVvY`&olSF4y zd65->CdDUtmwtGzoyEd`jXROf0~++nVX}v95>Xeh7j+#a!Hr0WMF5enD-lg;0%Vz&Sq%fqMjX+0BD9(SUbFRfi zgG+i!ozT$a;<>6{eea=9FnV^6YnPA|*Rw~L8|%}aI32$9R4Nx#iu?~0=s%=-{};@b z3P=C{s$-oSrdKpfq2PmayoHxK)%6X15fZ+v!ta3nS$I^Jk{Jl%dAW036m2(df`P>_3e;aRny_J5FWX~nH*f=f;trUEIj98 zg9d`z0A4Aaqt`eE? ztYQh=0mmahIQ+&KL8&6C8AkEaMU|9+vcMH}-6T9IZmfeFcoYeX z9(-aAX0O-4)(`?u%z$tIA=;mt9k{VxvD0IHxZ@}jo=w+l&@xCNd5l+j^a_p~Jq}oU zYKB{UoU3W*h-0L@4;{bY%?!6E)g=yRlBw%?BmwbC3Qxu8$QCKze=jF>D}rM7ctG=? zs7UI6B_o&|cuv+O+uaMh{Z`t$Abo3f`Kl+`=}!QaFOW}r|%d~ zX=L=wN`Z}%I^t=an*t^k&UfHeE<|WleKiweW62N!r{j70od-W$G7rG7Hb69k!b?;$ zqVOKzMVBXn`gbB!2JpG6F(^G*O@lbKBxGiL!!Mw7=1iCT-gS1%uF}9yZCf zCYhkAaP?dd26)@WT14-ZeDRI2dvI1FY1U=sR5oh3{%P`yq`jG-)?PNFws@V#NT=(K zC`cD$;f`0Rp)`e5{`g)iJ9ZA-mciINoISUN-Ka|GwvsgB5(`i$W9mU#Ty#&jhkLn7 zKZ9;IB;|0S(AK%BX>?J*`Q4V{`KhHw|D;R109-*%PBY@VCK_r^7|9_uGh&Q(U3|BJv;VDqft0yitE(I;X$yCk`oVx z1J7S+40V?y3zj-npO8LzhSPH`$}tFVP~)zk-t#cM3Wu`szPofmZ8NjgNjdc`XcPVt z&80KHYz8#WDm{^xPOfkEqu)pu?0O9>DNK&+l}o`dRujR+MhAN!#wiEmC4F!ZC{774 zoh#9lzv@`;;VOh1A*vsU!%ZAq!^7)*mrP!C9xCk0(&HO#9v5h)2&ld z+b-QY#q~())TK>g+eGlLA@H3P-=*t)WJE8{%sF5RWu=e5+9Y50jPZA)nkBOZ}SRatY8KC{^;<@1N$w?kEbs@LBL{h8{ z{>lQfJ-@VYQI6VNq8Qzrx=4VERF@PXPcYLVQFx3~#?1+37Ejy@H%VZcyb4CSmFNzg z3CvuqOz|x7R#r&^{_pdJaAwJjLzCNMPM~wOOYIt#e_LV5mnKc|Y*Wdt{WrO~u_&y& z`=mMqSgtn#9Y+?XSnulxctXWA7FRS!CeG~`n0=mmzW z^(vIvWhC+@LEpk^T`e6undlcw5cTGL$tk91XiTZxY9> zU{zPk>7{x=*IKKtnvqn25~)aGF)PnvR=h8Z=R}jx$;gw2@=-QPRjPO<7ccy1gxz5* zVa-$))ETafKFuOgTtAZ?L!%C;I_TdQOH${m-P^~tfz?xcaNcjF^^Gv~QAa9WNphSn z_sW_2t+GoccfNlCM+1Kf5k|t>Y`hQ{I%W9p^g*xCbuv1^a8AgmPcGd(p_)MX$#^oY z#2WvTTDUp7!|kL49rAu{a+Cj}!GwdDi+0MiOTkSm{2CF|tXP!T?KILLry#wDBVGxG zaJ8Vs3lc7=C>QkvNtNt~OO+GK&eIKU?dh#sdG7~-7wxSt_fq#29!)^Tq2FwxojqO3 zGxVp2#3|um!$P6+bvr~^3iYvL$u!h4-V|iM+$9vN;C&586_+cXvhsAkn@Ne@FHvQk zJube~bMUE4w&qc7kqHh_@mhe&h1N}Y?>3u~c$~XIrL>#eyArhtd3xR|c=OZy*^;lF zy=`gbd3U-cipADnF3UPu2L4i^(X(@h&Ry>5Y$(fPsC|LMR8O!+NQmpEUv+_B^y&oL z(q5=VYW|XbdN#zcM)gZegXvOX6tA!NGK%)t4J;Z%t*Y{jjUA>XrMfx6UQbpq;P*XaeTk2@aCo zB*aM`qJLVIPV*e0o^C#l6ljmqwD_()oumEa(1C*f>DL(SqfW#&pp9@8=NUq5W6e@i z!}pfO4{Um5;yFSheo>7_5bll{zefjIJpKvBz#H)G@ulh&r~~c{bW7n}4nCbv&4!jg)m$87R9{Z0yRP&TAOx%&%0 z;4~0Eo-V$s)pr@nLPA(vf9Qz6JWF3CQC7`PxGMlM`}g8@o0JFXP~?Zv3(mJ`IE&I3;m^u*6-iK{TmT9R3PeXFQy_ z8F#&R!i501_K2736Y1N4M)c*Gr$Bh)nK)DjJnO{SLbal{61#cx&`gAn*Id5tamQc1 zIAlOa5o(wzbO#>8|NHO%^}zpD4^%2%a9)Y$(A&%LLjwPhfPWXH)c+n{G3)kA2`nA> z(k1z=_g-55O6630clIUobISft)uXHV;w8(edYSU?{+IH1RVlb+{v9sEciPvu2^uwA zMVy~UfFDJ{pYb$eZz=i4XdGLTl|(<)G!p*1et1{B+wJrKrNzG zEqqO(c&s(AY8cfSYC6L#1?8{J<{Lq)WN~$kwK)+D!vF&3S zbWF{XJwn`CUb)7Qacx%&lOn55ah733yA0L2D0M?asHK3W)>SQStcSUtw*(~yqgovNz7xfVvJx!W1VU)=7# zNQ^B0ss-vA_SGFx)cBT>Fnl-#?sSILsd6i>-^8KHq*rr?ytQtfu6w+vZP1HtypPz_ z1gG|@<}~7dLW&d{qG^$aOdI8G1L zt6ttp7{)!`M=JT=lEo1BK9`I2F^v0NhGS?R@aEMx9IaCCl2VHOpjQ&*9*Qxv*$Q=o zVI&o=edBPf|HEp!T?dJhLy5Gl!}0FZ+(4zS3d7eI{9(|x>M&1Fd1YIe^$ZT3nA}gG-GK@!clb|-)B~n3&!BD=?i-wUC%7h+sOC~^>K_!^b zlZG+4Boiw=qy!W4MD|d{_O?>Ld8{E!VH~H&FBXB7&oG|2m=9`d7*A4%!)QEp2?4`+ zT5;X(QWeQ#kw(adL1zqOCFCgY;VcaGBOLW(grZ4TMzY(Wwlu3pF$tuaK+mu$#nzi= z8O(9Lc#mckQBy`kIgXVuj4`YoXp3QtWj7O!V^LuD$HTwrV3RD#B|8!5fjA%?Z!^64VQoNa(=T!t!rdg^G@@KGTW_ty(CMx}l z#o1WQOP*rz`DM>DHclm+6XK#wLGf%VhQW(a<(0hl63cjcW7kRI19`#=exk9R*i@h(fiEmJc zqJCm`z8M;=3>vMs7%l;h4O#+;l`cC*dMU^k=PV0x(2PBc3)}KgvJh*9!_v0aFjlhL zNw40fIpUP~4g&`c{uMB_E*bEz%j)t%>D~oSabph&5+gYH+b|*FKW5Z07#BptBf8)8 z-z~@~um9(*f#3zk1xr}v!r1i{P3tsp*bNmeyC)L$DjN2mK$K-QE0!t8ZmJko2A%n` z7s~U-A3`gkm)F6h4RlZk@JRql)-4Y(;$#TMd7V@IS0G7K^=z zebzc>u_hiil@$J`b>3o4T{biZ_c|2>lpDsczp6}`u!_j26akF#68=Q^;m;sb46jhx z8e}#TLn~O}dB75nRWKs^nPZ_k;_(V$k^RklB%i2IDRQ8>7|ACqREgqC4QDITCY*dwUQB}jEs+Etrrb3kOs;KG}qpqzKbzMZ%^_8Rikx@0S zj;a|IRZB+It`b$JYE*Q!s2i?{s_ToYS3T;+Yol(uE~@_ZQ4Rc2F*TwZ){JUYD=M~j zRO32PO`@Zk-Vk+j-KbmYMcsO1RI{6+ZmS>Fyg}6MF;OiVM%~dUs^yZC$9IaTJ2(7v z3ZT_Sg1A+)k=c6ssgnTlGYH!3Dmn&F3400J?jvZopP>B#g2Z14?m9-$;rQFXLV}Jb z2s#yx#u7SjCg`$-pzAjTcW))=c4ilr(EThyk8=b)|0L*jp5UGf1o!?$aNpkq_vh`# zbPtT4kJarxhTy@m1P_fPNE*EiMIIhQkUW;4&)N?#LEoZ0tXaw-g1}*den$xU@A?kk z4cJYPx`*JAy#xdI5v1)WNFTiz`z&MIhuG-M?F3mn2p;``AbTf4aP6lklCzE=_alNq zd4Hb(=HOujLyDfknho9X3raq=Yd^x{y9u6HyW)3vdU74XQ`7VC-P5sAd5xonHHjMD zG-|}Ee5~`x4F@sDQ5y-K`JCX{F9=3&A{g@}!Pu_|#uc5zjK|NJ_y@pqb0(h#m{2$s zn>BIt3&_kbx`0_vIz%w}Fu{}~1XGU^OxrdeC7<6;@WO^IXW(i2@oDD(W)zP54PfSG zf?2CZ;=9?qe#UyfxSQamJp?cBC782e6Q(PeegtdwO3|^i0CNu!%sWi*>gaKQ0(1WK z{aEa4GYA&UBv?3$V9{)X*Iy)9TyzBUe52qQgf|b<(_2RfmK-HmdUMpWTcVcd9mgJ6 zv289^bLHp>DF61Hyz>C>Y@2!kVAXbl)jJ5@T`&)s@8vK23*h}p1RqQ$_;3ornyCb9 zrxC1sp5UVw2tFSDH>O)Z{mZ`rJ}KIX@aZ9f&khr8I6|=TD8c8y5PWfrVAHPzUmhp; z>IA{pCkYCRPGB20A0pUtnBbfI&v7)iP9perGQoFK2)-|T0o7{Tt|PwyY~M|=V-LX( zdkJ>#BlvOc%h(}5tt0q(;cNJ`Yi<6pA4Sye4R4MF*t3yf@8<;jz986NH0uaR9ymmB z@F1Ctq9TGrhX@WICOC40;OJ3;Ukc}9x?`IOeqHrCJ{_O49O1<1_Yh7V{|Mo?wObHQ zE!ctZ`FK#>;Pd|c95_fDTa71&3FSF!w` zkzbKbBCNzF6GpII$B~q2@Y{_59ugaDZR%6HChXIz? zuoHwnc9O6<`;G8gc8c&i_B-MA>@=aD{XtlRogu8r&JxyQ=Ll=FKMCuw^Mujt0^tp8 z+j=aoF56C6kL@75k^Mk;6Wd8xpZ!SKfc-=m!+s`g$aWJpVtWW<*_wyp4TA*qnVzcspCL73Ev71E}2c4t9{RB`YGllN};# z#fm1wdLrXk;Y`HVZ0$zGc(#tP4O_4opA*g>>Ob~_9tO~wrdZ*AHa&nfYFz!tZ)+IBW%?S#DQ!z zVHz8~5TDc87{Uy8dnOuM{5<5aTnH?pZ!hRu~%8n6EW7AJyy64#p!WY;~!s%=l;S4sLa3-5G z9Ts*uixm*gX0H&w$VSgW{!8qS*AQQ3#|h`KlY|BAH^NugDZ;tzcfxt>G~ui4Ea7~1 zj_@`1C*cBio^T<%K)8tgMff@!wgAgn%tjKv!Okqo`%%a@*;&H3*xw|#gyqrurEECi zGWN?dlv~c$zJuvku>8*uSF%ZjZ?h!_@cA9KjBpiOPPm$_AbgjtBz%v(O$h%O{ArZ` zfDI%3kj)ANMPuO_EPuX*XpRoyq8`wm` zjVzz=b2f?a3pRyt6Prr-B|AP3^ZkmQApDx;ugB*?Hi>XED;y3!irm6B6Mn6o zXCnVwwrwHecWl9G#P8Xe4T#&=S;Fn?9N`Z3C*cq5JmF4uf$&GRWG|NY6Pt5nIP~w& ztblMA+prDu+0B;hLfpfq7a{It`5%HCCik&Pg!|d9FY);R+wd*oLH0Re5nJ*jJ|ALB z2@kWPQ}}#@6`n^t$~F`J!nP0|W8V<|%C-_7XWtT@U~|TUV|ba zidcm&C9KMq5mw{VcOw59K9kVLXAoBBdHaxmEuS+2ylZ(KKeHO~dcI*JqMvUhtieAg ztjWJ1ti?AG*5+Rl*5OMwqkJ@9N_YcbMp&0GC#=U;5Z=gF65hn$CalljA#A``5ytS< zgbn%h?U-L9K7%lp=k3Pl#(WrI6F!`{AbIN~jQGvTd#3t=<9>J0L4`2asb*qfhO zj?WMBvxE=vbA(C!Pr`@!dBS9Vfv^w%i?A>Mn=pmvt-$mFK8&y*A5Pex?|KjU1Nd&j zRKDsYK0m@&6At9>5~lI@2-ErdgccZ`ApSGqV7`lR2;WUOl$gJnvh?mw4em#FzPI!Z~~^VF6!t6#1|4Ij0ck z^3mrJ=kYOwukx{k^ZD8-xIVwe^XDQi;CWjR7xH0*i}-ND*Ll%)3F-hz#$T*h}3F6Vm)SMUw*Ab%y_Ncc8ivId{u;Y$fu@nwXo z`EtT{`3l1K_)5a}`P+ma@OKD51i`~_b}xQTy6_$6O34f$X3{1*|w<{MTZ z7V?dRoB5LW@p%g``~mSBzHRsjz^!~c;kSJ4SbYADuOs}PFPMza+xVFR#O?en;SRoQ zAwK`WcN6a9dkBBzdkKHy`v`yL`w4gP1BAQzLBc(}h;T1IM7WR7DZ>2r^8&&H{1w83 z{LD*mA|Q+SS;9kn*L-|F%%^{dc!cMDig=V4eTn!BKSX$pA13^j7k-QU<9svW2|i~( zKA+?Tgun4u2v70Rzaam2e*6^TX})&kD8N7XI>IyjBf_)%W5RQMJ>j2x!FZHA&leJ2 z;EM?V;`vjN|2LmRXoxej@R^CTgk0>Ji_b#rCN#w!LQ5=Jj(l4bZb2*~Mju8D6KjV( z16Wq9BP=IAA`BO+zDIs}v6`@g7<~|*uM*o%BUTjK2`h>G3D3fFgqTEFSxhF36jKPV z7H6j6`zUdiP>LmQ;BytRl(4E;Mp#WOC%i@+AA$49C-Oc(x$0sV;k9Bo;dNpJ;q_u9 zp`Hy{4PgWEJMm-0X~KqL*UZt7zLD5X7%TP=HWo$ekl#cUZbWP`btUZIz2_k>=7{In-5@9>B>jivnFLo0qis_$YzuzTh5_S;N-@^AD#SFquBJUl1 z?ktKvMC>9C5q1@YpW^e~V%1lO-Nb?mh~33P!X9D~VNdZoVK1?m@E-97;l1Kb!u!Nq zg!hXjgb#@PQDY&W-eMBrgJLq_Lt@E1au*h4A{2`)f72;5Fi10CSnDB9N zgzyP*l<-MWxEAG}5(^fgzxcFR^(Xqpd15u;F!3(oaItnN?teyzyf?>#{76x>5^%A7DNc#F-J#0ZtT4W+CQ_>2nb$ ziNbdgCyUL5Q^Xd+sp1>LX<{qk^J3LUDEESx^Eu*lvGx$+46$qU1i+bMH{mR?hj6yo zOZcMLNBELBK=`saNH|9n5f+F;gs+Ihgmc9a!g=B-;j7{o!ujGD;cH_094v2vm_fKu z%p_bSj(>;z*To6K#ezaJ@(n@Z4f&>^(_8tLpaW^SM9_(}Tq@{DS}qfGCM}l>I+T_x zggUQVDJbqE-xd`3k?#nK{K!>;LQQhDpjeZ9S5UA?z9%T!B;OYlZjv7eiZ{s*1qGbs z8bJ{!xmHlfNv;zV6q6qbii*jP1%<`rdO>k9`H7&wnEX^wWK4c0C^RNF2s&Sv8wJJ4 zzY-Kdl3xo7A<05PF(kQJP!LIO5fnv|-v|m7$*qE7 zMeRyM*b)$93y`c6pxWV3koX9 zU4o)Ya<`zclH4OGt|a#g3M|Qef+9+`#|6bCUO^TSwGA4!0WSB`YGg;Q8pqVUZQczEZn-tZPGL^lFFqM6)Y%2Q}X;REeUTsnkPez#(x0BMOsGO`~Qdmw_ zHI@CYW~zO~H7146q|c=InXGP708L(NQUpz2XHp1FUT;ziP5MoBPE*6A7@n+YQjkp6 zGAZ6CYnv2sl66dqILT;}LQe7qlR|j1u1PUGS(p1jdi=N~tj6qS?pO^UY31||g# zWsFIIK-tiw;GJxw#QWQ(xZc-RawlFD-%6zG$8niT1itxO8_$vBf@eX_Ml!9E#pQnXLD zF)7?96HJQt$+jj1{A4?mB7U;HNg+R(Xj05i-epqIPj)aV>L)vz6!w#yOp5!-&L#!^ zWEYbnf3mAdp+9-INwGiK&7|O;>~2!@Pxde={3m;w6#tXGOyyVIV=BMuUQ_vd_nFH7 zxZhO%#{(t>?qqLMo$owos&l`GOm*&;WKu*=K5SA*PbQlb)02Hn3hK$edc0H2zu~_! z##QCZm=!0&{f8xaCDQ+$ou@T+{ zO>t|PYA6+i>l01N$Sl*4kD5$o>+CBYhIi#LziS4YoK-rDkAK0Rh%mT!b;=Si!99;t z*1ckabI)UPK`{yLc?>nzAUV0h)sat|hmf2CS3Z_o`)&ksYK7{Nt1PlglS{A4L2H zlFQ)Ij*?tvnPRgg^FVSKNDc?d5hge!rYT36mVCyv<+ElPIob@9W6ZL0tXWQuGsES0 zv%GxHtRN?tSILQHMVW6_l9S8`IoYf%r8{%dgEYve4`*H=B3MEoL|Q zjoDppHG9Z!&7Sf*vzPqdyhm;`@0Hul`{WMue))s>fZS>JmOq*g%Ad@KOIYZ{Z%dY;KD z)(cElwWc#!&6>gFHP%cfeby`{t6Q^~yw-Y=$?L3_n7rP4nMuDjhshdN0h2YYSD372 z&1JH-HIKsbq#ywO_3j1xd}J%@AYq(UMA+IoL>O-!CTwFJAxyB261KH|A#7(I zBW!Q|N|hxi#rA{9n)9rX%x8n)jjwf|Hp3?1jTDK!lw_})Y$8g<_5xN~Cbvs7sc08lo@vLsg zXsvf+wBC)?dN)q%-FU5c&uP7zp!IH|*1LSIcayZ)kA^ceAzLy{Pr>C9QWaYrUJJ^{znc-78w}=4!p0r}gett#|Xa-o2*v zZh_Xjg<9_xX}x=0>)m3lcW-FDdsFM(TUzgy=y6Cr%kv+o0jEI`B*nVt@u4T!w_!ySUc4g&z7L)H=Tz+7g@p|Wh16S zy5B4@#G(>^gLGXjxe3yJ3F*FqQopv~L>EflY*})PWy^1@GIA@_;#*7Yj=r^s}XI745Rrt)ktQx;3%K zQqj@9)?A8a@3T~NbHAmcKnE-p1v+S{C{U55PW=z*`W@EwJEGGc)%E;E*YlXJ=dZe+ z$8|kV=z5;ieO3`K99P0WzKw7lm^~wT0jvHkkk|eUe}qwf2f#IdSvI@wZ!4?}KMjw( zfUmZ%H=nl~v6j+r*i{-r$|q$cvuhv)OJQWX{d-d&^JG|ME5|O+2Fb__yHx{V4S~1W zHu`mJs5B#kHeN_(kI68D#_CbqWRFUjX&W-jW=7;R^?sPFTjP0~wUS}un!(lOeE4H{ z88eYl^9B2QlzAB*JaRRsmt>Q4EyyokG9}oq3FL~kH)_qWSsTqZYR|N1+3YSyiY`{y zo^8KqvyLH=m+Y5q*0s3E9J|0~-9jR-*mG^x%Mq!L#nhf>ziPAI#d-7X*KGDsaoz&t zJ?!wX-;CM|k(cc7(3Ll8FS1{^S&GBMYt=^W#r7LE>*w%Pxo_HU*(}xJsr;58?-7Tm z@>^;zvss$MQ!QU^udrE$<{5QX+Hc!zgdP%-sPm4!%4Q?IETcS@TM<>NQZ!K4n-AK4 zb~-AO+b7|Xm61lR2W@sXiiI5jl_`fKVbpraX6H1=h)%K}w%PfpLU_Z%j)0I6oox59 z*@Y;~TC-~RwNq^Nmy@V=!0ucGB4`4J?WA-KZTSAHd4%d}#cE{7+U)iSY6sQRh;D2*v000V z-J#r#=%)6~HoK!3>lRGdGD3}!%Ij84cxMDwo#_&rVZv6$Shv~DZPq&Cfmo;VMqMoZ zb{M+`5z#H|J7D5^-$u8z?}SMkF`g1oNp-Q*R*)bjB0A1)ZLKp=~i%8_c$y zmsLB#Zfmndr@Lymv)h9$2t6j+ciF6i)9u(Kqjm?oqs=-M=XJ6>+pM$0qY+iJsf*p! zW?j55jM{hG-N2Y=o>9BI-NR;ghaP*{y}+z^QRoHIt0KY{K@-*VL`H;WkkZz?4)v{a z#A*;xa>Nez+cj) z$sOT!jTcssn}+d$of8J43zG&c0-C*T=Xx5M8s+p2s`8M{naL=ikyw8iJj zr>JO~;$useg>|Z&4ZAp-%knnd9Vw(% zZ1HJ$MYKa2V0iHI z9TSL>M)YG|6ZizN9`~9+#d<=SKxzjSVMITvOyICk?nd-e$^?2@Pb(An3`wcH@{|e0 zsx zf@1=aXVjkPm_YTI@0h?)igK#lNsb8&y)dFDD--CBh&F*!lnHb@JY)igC=;lZ`C=28 z097hx0*OJ|SuFDHDjJr`Xthqt0M$0!b2I>MB&kc~hwiFv=5!e=1^i zDxyPOX#l>!$4c2q5fA@V#K%e`hawe^p}RE=MUZMhkNC&;7Vua;c=^Kh@+*4Srf;(& zVYw2O48@)V32onRDDgayWn78!hOgXhz@7)}Fqd7<@YO)}LSUD5*{uy~bzU z-0*cl_6NWYciCkPUq57j1nlxIJI+uw`3l$-RKtD%@+z0Cs5Uiwfm=zD2Z4-G9kBusM*|t6oe1(yw|B$r`H89e}K< z$Sy$E@-4!Q2Ug<+wcU)X8@_c8yN=6lqz7UsNJsnjq6CsRxDw3`-#KKbU&9OPy6i~9 z7je60*K^rd>+v21(l@H^7!Bl2iW~=IeP1G`n*d}3H(dp-nUjDW;~R()Gl6XAN|Z5F zcf1VjMyfmJ0vW5gtAT8+#;t;n7c}v`iCG}o)Xl4xuhWKRM1oi6)+&7KbIR=zJ$Viu5bu0)36 z+lTDgz;5lbJLx981?+gAeFsP^1+tARajVYy9bhN;>Y~IdAlteUcN)Gsk-Z++?Ob+( z;p>I$&w<_EWe?Qj^aHRHeM3;<7a;F)CHfh@iO7z;mKStz*|COiKC-U{c1M@pR_kg_ zV0ZFuKnWx}yAs^+{fz8k*YkodF1xDXJMFN$y6meAUxk(+JshO(cG>j|Uu|TM0CqQ* zeVsO`Q-IxFDL;}uly=PqvZt>%rdtDKFE?GPwh+j^$M=*Yajz>e%kWJ@_U9mRpUduV z_!cAkOJLvcvTGT>kCFWyupe;Qj~Kok$o>h~y1J#L5Y4q_H`vH8@>!=rvp31WlKXz zF9UW!DFBlF)Xe)D$o{_hm=4JSZn`?47RY|41}{ie%|h}K#XSJzK;0@J(|m_e)~Lk` z(%md>hacH$1?&pI&T!cc3?+RPurpO-BY@2EH9+CYKtAdUx6u>H2kdNL7nHaT$e=6H zLr;@hz|QeKiV_Wg%ylK2=&@}E>_NUMDA5|o!LCF@!?y_8?SVbSW%tojqcgCF`Zl3N z4Ewn)#SxXe; zF=f(`9>#nfP<%en!#yXE*Xjid`6HOB;cGyTWXdKYJ&LI&_o^)lo?&XrJ^=Ky%=a?p z*&FE5o;B$U*A9QIhu_gqEqoc|$1&e;D1-EPkIX$vA-^{8r+fG}Xmc44{296|q-QdBf0(1e?6YVF`ujo9d13j1dy2Jy2A<*+Y`PbFn z1M**GzMvyB-y@S~_{JfB5y-se;m7FNh5Q9f+3k;jUdWWuKzfmG`=>y^uJvaN(2JRJ zD3E?b_bt+I>OOCDgD7}Q>mJfen6F|Rs6%6*mwM_@Lyz@X;4fpoCMYul=;a=nZrYzh z{tBj6aHLl<WlVbS3K>nS;|I))BXZZFy{I5LxHCpF8gZ$S_InhC&3$<<_y_qTZ z2I(zK`OAZV{)Q?_iK>w(92I-%eTCz6){j=8jjX>{WYFo7%=-o_hV}1pC zkDgN(fZnV5;WvqbeVSht=>0l>qz}M2wu5%n0Q#V({?oNjRUi08OnE9uA7aY(-VF3% zZBK3o`Uq1CVLZ@B^;}K_`WL2b9n!~`T7tR({VVh>>gs(!AJ=Vv6zCJ$Ru2LCB=dcZ zbsGlsZ=Sl{q4n@N;GfcSX9m!}J8cL0v|h_D0R4wx1|x#=b5j2d#D4_7d&;i%TQ}&9`OIt`c?$=-`ZXwoyV2kMtT@mdn+CT>nT@# zg7gT@?*Q~juGZWSfgZ(`Jv;&QGhD4TNI$Fjar4mp2C$a zs@E9SM6PW0JwQ+6${#@bd9L(!D$p-*wZ0=go%`Oyy8R6F3{TzG>K*4f;LqgB$vF@7 zEUwn*3qa52YE4XN0_&h|FVZjR{(J%GmvxjO`){&X3 zWMVJ^dkfP-J3we2SDVZqfPR&$g?}f|^SM&8AAx>N5AaVwFW_py`x)qkTsic+fL_Fv zW3(IS*SRuGdw^c78?zVaH@Moi>;w8u-LU;Yzok3$0MJWxM;rutsV=Js=w({w5YWrD z%weEc=yHDrdZo_)IM8o%We`sQ{f@5BNuXD8Hg$kOx&i2qJcAUi4Qn&tf2=14 z((AQC+y?X~Tsa@jf&P@M^M>1j{*0>~VGE!)aODWz0rW<$Hq0%7{+uf#btlkYaJ7)M z0(z5H;y9qc)aAAY`YW!^W8#7S8u|epvNk{$a^-X;0KHk4)fVV2TBaS)-)NckKyT%0 zOWq0SZ$W49OrtZ<-*Mm8jzISW`g>15G&PjL?FIa8TrF%50=->N!e@cr!Ikrk^beZ8 z_GVaYbpIp$qt?UvK>wu2qC3z(b6>|!Q0^nQz+&Smcf58ukiVPzvK*N`o=Nf)Op-D9 zA_Ii>dJ=zV_zE2UK2L%-Ai=x%A{P?u_aykr@NIGU2RsSxhXhCP#WRrLpeMmohEH?` z{zTvxdH5r>9a#nZLt535KCD%4FVIK0THBC5s_j>qTVV~;(;4YwdMIlG{VUinT%V9W zuBS{A&?mU>S*!!nCp~q@F?=s0KNyB_Z;kY6t%~0R{fAbS zt8at#3{(a4MEa~J&kp)ntO4-PakYja{in8rcL9A~n~@;U7j*p#fc}dspBU-CbzkiU zI!`FS5b0r};k!28J!Zd^Z>22RU%(!&MUWmL)bepvb6DDhni;i$9wn58y9MZHgjzC? zepV=}`4rHjg>ti>1$vB@9}o0cp_bi=K#$XDCj&iRsO5DQ(9a3w*1ik$1f6y#&=ZBK zC(`-CcVkzm{Et9S@*KU-*Jn_F0DrPjOCi!zgtE}=c39ekvV2HS6KdHl3-t3sSskQb z5Ng|18R+RkSyQBE2({IX1bU`Wv(pFkETLRDq-X0qqk(==r$zcDp;mSk*;SSpkLQ@NCA4WP|IKW!<>9D=&ecBw{u-@ge*nE!_eq%+u!3p*MEWD4T(U5r zKh}Cx7U=a*79Qqa4fH2MxqneWe=5|f*dFN5w0t7a8$`qR?fu;##UJJAwq5zd@KE(P z`17G{Uw6$1wq-V4qiT7&e4eSC`+FeeM&Ya89nvBFxzc!YXzGI{rPI?V;l&p)O>uXH z^d{ZqV}brsH|0g3ztYnh>94iAEd;tyH+>nEW6P z^v_PafZipPL+~QdyY=*12=pGU@+*PftEVi|`}B}*1bV;jH>3|}b^8YBgPMpigK%(kFFU zNkIRm%Ss3Ols0Kd|E}9R0_fAaPmumYkJl8S&uBigZOQ8SK{f6}4I_-}@gR)Yn^BB;>KymT31L@&r!?jAGe=nbx7v|dozkJI6fEOc7 zcMCJkP;=+5J7KLc)o~5dqfE7AEdlx&(?%0@_>84sw_X9n{RO|0Cpl9kj`~ft4 ztYYB7GSah6WoL{ySZlO=IM6Sd%Iie>WmBzvRe_#kst9J2^03yJ>e$Mz0Ben@)|xUv z&o$K=9R~C~QyG`CK)-6L^}QU>^EE#l=-13f>+E;$ff||cw7~qxu8z0G=)$Y5!+6uK zg0;s~Ha8XMMW*jCra=02&lucmH2T=ayGpDg#m~(JHVOnTieq{ias7T%h05 z^kATuXk8cz^ior;w@(4ROw)NlFW2KR3g{J@eirDJraFGxS{2qJQ|;EQO0X80YHde) zmFausUZ@??t3CC;S;uke)`fM+R67==-_!M3TN&0Rt;L5or2dpqhVpvl|Xu(Ztzj4=trj7Mj-vMR-_X^uh;xPp|DTPt8sxb%9tWQ_7mHP z{S>Z5erEI74Yr8gXq&O0+g9us_S3PO?5ASCw4aRq%6=mDYy0uoLi@4U&GyjPE%uPu zZ|uRbTkS!y-`csc-`P2_-`m01ZT6YM{JBrWZnr;Jjkv>}K71VD5BBt_qX2i>^o?Qc zk9OYCsqp-h{r3^XpY0=ft2=g=J#`;G@3ueNinzyq_Ibp;_M8cb`|Jg)rU34@Un4wV z&nG-+ze-qS&m%l!&m}x;7tX-)kJw+&9t(KXE}Dz=_{H9}4!(VlJ!bDF{MFt=c--Dg zc;f$K>^tD2DBAybZZFw3m%Yp0g-bk5NG_XDBB4Yev`FaEn-Gzrya>`FML4gXE(PAyubIK&nNSI=b7hu=9#iHbF;g%$+M46 z2f1I9w=SOw^tWVj21DK7lf{_}b{+)p%nG*Q04z;uj;xe)$(q3XBMhcXQiG&hc~EA5M=f3XwLdWbjY`&d-e(9v zDP4v1$sF|MbnA@r?HFJtfqpao<1~QD>8dr6|9Bdpk{+~f;{#p-sHR7)+X-svSyIs* z9FLV|R(if%l%fb6PA`;;?&SEC9gCzll8dHrd>j$prz(+)UNG=!U{)=?om@1PcRT_( zn%+?^nnpN1y^~zjgYaePo#mqGgk$MlTn+7OE`Nwisf)POS#V3yHPB%!&%19 z8|*#tuByXX&L8IN{U{dP;jG{*Is3rF$$f;c;_M?&@x+6ZQtoQrgtL#md~`?3UBlOM z_KAlt8SD6?oP8R_7N%wr)p`f?A<1}*ujlNuC^m}Y!Un#Pv(LR1@HjtfurH$64vx$d zd=qEKqSzh|=Se< zDgHEPCo2_TC*Q@{x6vk$Oxfk`=6g6h?Fc0fh0oo~_i^@Ht=i(vVKz4!b}!v|(Wup&pPmkSi=$aq3qI>xfDc3mT0Kbi!RQF9 zCtaZrMa!*Td<9KXc==uLRg>(2*kJ~U4#U)M z(N%j8zk#!XqQ|(b8}WJ=B(4XSHJBH1c0(oBf3RcSC_0uabrT=T*HS{+0aT} z#Bb(ZIU81qXAkF>D{O?gzFcb~_@@dRDf*(fa_o`(UW1ia<B_aFQW`4ka`ZjLleQM!E^ei? zR@fDoW_r?^-h^pwl(q`1j%h(pni!j#v{TwEEYGc~5T%PcsknT^N`ck^Vj#L2jW+p2 zf$O+11LqVloa#{&7fJDF%I6BJkC|GYv@dXRA(kIlfl~1o%AE>pj8b8b6jq*8d`wxZ zux6MRNu)`|Un<8H)&kjS2{tWr$mUnd35AuQKs6GeKKZqBQemYT*xh|jH~mIArLb01 z;STFt<+Q?Dds*@ClN(jE<&$q57 zc+_8LwIukuzmXJDpp!LI*fucQ47^$Dw;GeB6=1ZO;2OW*YC<|2{i@ZJ;8s64hWVRq zz(#7)$4VpibGp?*!Cu9Fg=Ic{5QhB*%lvDUPD6qFJ1q0xRttHZI6mqSpA zaT_wSH{f_&Gdu+%9=Ys|RoOy12kfl~GZHL@OeFhp<;rCCNSMaj8EDdeLVJhm#^6xJxb0yWh&g0)@Q0atFgJ(k^JJ*B`WftonZp_IN!h(4{rH-Va7%-^Z( zQdo1xP0U&sztipA${x7FFXrz>`K!a*K+zIiH;@Jv5fHM$`-|b zMroGJ)_SYOepYFb%pMI-7=|WA<&~5q+xr!G#qTjM*M1JGbVDW90W`O<66>IHNMTQe zI}3Bz4FIxIjpeF;qN_}zNFYkl$RB@#mlu{L37(Gv0g=U+r6y#QKe%t zd&=!6@O@Xv^);n;GJD!nR2@5mE)wIfE8UaXE=NI%zoERTu-&dc%Hd_xNy%b|(%muO zg~Vm^B3w2d=~_7qm(8eT9M0KG=}}PXrifSm=mNR;eU~EWES27O5e(6@vpeG@kQ!Vxs%L3tp7L7Qo;FVPK*=nf}M%;;n8u7cVTi z#OsJ{EXnGjOdkm)Xltp~bfp1yu69VL6uC z7`s?=taC4;9Wt_KVv%yJP7v|PWzVY07ScIjFVA8mh?A6*GaH5zha=hdD0386+oH?7 zJKDGJ#br)`MS}sc<|^|PR>u;h5T+!1zOn!&>PoDIC{@?G7OXIp6c?dXy-KY6l>1?p zsHC`9c|c)Bl~@m=)Kwl9bu#-Q6m57BKfXj+3PXuotoSlzISeCi)P7i5p|GZ2Ei8&; zuT&m^q2glxDwJaB>I$2(G0>ssY*97Uf>4L0wiRG*}eUMX@8>;`+BUet3nDC(93t< z$3#iV?vK-@`${^;>bD2rbm_j5POO27cqLtwLu3KJpOyOfN;*~_S(oe^lp=g3-OIWW z6`P0BgcR8tjEXHn=sHu0^&eC$sl*zBvq{U)kE1Kh=VR_Lb>S=NH{rxm>P@wW;yl$V zlv!S3HXm~>p;yv};e6HFn`+;TIkc(78jfb$dRg%iN*j75T~t?mq~g4iPSH_H2YMx4 zR67<)ijT(0246|{@XD3W^h&zmi5A5%Ns8a1bfs6)D-xvm7@U0Ym2~7u@v%xzdL^Br z<8a=>SJEpqbjo+U(uZD2uSk&WTa_|=B|Xt1&ij}Xkl))=;T7*=_M`cUI`hTz(|h1j zrTIxPM5oQ}i~TJ%bOJ=w1luU1`3YN(*fc*~=gdz+5)<9|39p;r7Z7lPFfh>_69fr&g}&%|{>a0!aw>Lw`E)?-v8APOS78kcY%!JfVysQa!W z)qM{pn)el7-47JG?uUwBieA$fnq84!3!Xq_T%w151JsnWnX-8fP)mOyhh2u(Z0ipt zbsY5fw{5BPoVxIJfbZD(td+Ctchdfu$*l@#hy5G-97L|w8St7q)MEz$Z1m-_wnAeCql$4*{0q_d?8;0Y`$lcN!sB>I zkSR$r8sH||CpM1T>U7K~_bT2HHi#WnF`gTZaEtqMTWoQYP%&;ef8hv;y=K@N{n>!6 z(Z6%8vkLA`u|~E8JEQk~4Y09|JELM!Dz<~xi-y=nYGOyN!^T4>(A3VdULx4c&Ug2j zn%jle5ku@VwXhpmFO!85yQ%ex@i3}gZI@WDlChR{JL{;i8o90Pu9l@9eFI=?yHDwB z#$(8BYfG&3N4m|?&dy?`KN(_e(%z1<>OUJyvel+oM^|Glcp29qBl{Nv>L1>F2U8xo z>|d+0g>(+s^Vy69MfE4{TZrG)Vi*#Eh@vC3nb`O>rU4noSXaks@~DO}|&W@D*cmWKC5uCT=|Fi{|9 zj5IjsEYt1;O-JZF(DbbR1UP_X+ZJhA&^hd8W6QY1F(le-4Me4Q8i$2V9XBi7i*a!=kT?BZO5XCl03Hjz&T-eLhJRWwN{pV#1Qv`0zgGyogIsL5+_RxTPscnza2cJI(I2%b0Cc-b5UsHH#6$hX<) zJnI~7JO7v`S%1B;IDbQ^*6!a@(k-Ag1_f+>tBLr z@&cBm3JZVarNAm%z_WR#m}gI`9GI_r|!VZ#-E?>FFmV{ z#d$=@FHD9xWd>q3zfdXtSiAmRzj!OAP4H)Y3_d>+s)HG@tEF&K)%;D6=u`ia7J=5V77*`rBCtu8rWT%c% zXt21KQ`e|xFgwAaTRuj8qk+L{=VLb##vFE$ah1X9CGv3C4UI+yYn+gljK=UabJjdz zR?2N+G&NXBK3Nktd!B>R=r6c|l-tZ`Zm`ylgt*01MZ#!dlo+f7)g2Z{WUs%B2ydq9 z^Kk?-dKf(o*3DDH_v2uIMWH}sF6;}X#L$u$8FPCXy$#miX*j`OkG6BKHToFr28X{* zu&y`y8*H${N;(0#V1-l72>tGg_a>pCD8f>(qgEJ_cRANko`+OdjK2V5GGH!#rdtUy31pjtp zvcbkWd|E8sVcco3ah}FArWjMnx7 zlb>tAoz8n5C+NX}`3BsdoL5!6YSs%4xHEa*C8a!B-)G!!uqBt25(aO^iZ>oImKf|| zkCf;(*v<5Zu}6!5M3)-N47SE2ii;9m6i67vKr)sa4;yT~M=Bv8W6(Ua_pEF}51)OHn3=FP5Pd zrT*_%w5Uz7;OPT1kB+Neu{6`gjBal;wj1o%OB)tiLjNw*N{k!=iMk{sJB+6c_V52; zL}V>2RWXvVB$mrnhNOyth;E3-pEh>F61j#`17e+K?=p77g183GBMyHyIx8|0)q_SP zdylc#VCff$Rk$PB`;2D{mR$oCWtuqbXN~$bOK6Qs{MHnksu!Hs1kMTNl4nL>(lC4s0+fdKS`CRJq)| z0HAq56{>KP`a>CxxKztoi#+(nb$UAF(E(Owc5>T80I$g_seZl6uFg{j<3K$O0?Er5 zi;V34Dy#{v&YK7kk6iYEs%#;h1NKVu7zv7f==eYtmf>w(o*0kpCT3HUb;-jBYny7e zn>LZT)&*?e-y|Eb<7N)5B#Kk2HUQ=boT)h$ssn>^~ z5~CS1vTLFH#k?g%JaXB!tFnc34%mY&1|cZL3)CxB)~q60M=exYb2n<&RqLs&g}9UI zD70@3g+X5=sY6)3CZcxqU!j)h(N1Yf-+qIUqM*UKt|V0@JY{% z1NO2zXQ_JJ(dkxQ%~o}&Vqu|sjv7~Wv0ic0b5&c_Yq;roYQC!1E`YopPLCR3yoSp3 zT2l1=gYfjRs@3V}U!-l|yE^@;{se-2x72x&#`iJx@wdRI&i85j83%U({(i#x7EJ7V zzm}QIpW6f2%kH*V%U1azN2~h-T3qEX;uYs`x;?1bDnH`J9@6G&{54m4i8f#3Z+fMd zY6~^~u9GhH^xqBMF75P}=mUJ2)aktX+TGyy@{&%|G=Uy2?=)S*k4f+qt9sp~EduxW zBU9l|?KOj9tHi^iz3>;65s#z8sVK%)&xG_9$Qvx9@B$<3>^` zy9bN-I66&?zH_jIPdJf#v4EQ$(RoWHsHE8VMW`D#kH0g$;P)Z-K2Lyt-6+BaLv(NcN|Uv!k#8_ot5DcTz~HEnc9f0h2uxfb$9KJCK2i1s;ZeewM?n z>*c0k_9)xH%dIZBAsap|fwnixsKbilbW4NAH*av#yH(SonmO39OO>3iWoYIwtR2_LWm-%#%gM<5 zn0LR+wJS9977wR;rgnvDjv*}}BlS#5(O58ieTo*0sJl@OSSGNd*PB`}U47PJ1%Va2 zKBxsRR}TqRhq%@;X{x41vh&`D>dM#h8r&;M*J_>mXuTmGDUf;~qmc$b);jmqB85yH zi`T++fT0Nx>l2aD{dVm-EmDk1Or0(`leIQlr1@OHl?3n5+G&xNRoVUT)Ou)<_SlhF z|0!B8Ez&_?sbkVj8a%Z%AXmem^`R^#^|!S=EtLH`iuTC4N0R!2KxX$r+6`JLrvYZC zw-FBfM;oGr;zCYGz68bwddfQR5GaeXwgiA>MTWPmX{hCt6|sJqFkB|eib_n)6`X#e ztaSi0)gySlC~H-A|Nf$^=sN2^K$KNraId$fhF9esgSM%u{ z1NjX#7CzXykrsZLslQMRri8<3p_ORiZ@iqVwOva1 zv`=_Uc3L9mN2~&}Ob=IdFV*%a;h(&Mt&sEEMS`uN4#H;=g1REArY+P#_|J>j?X?bC z_#Y3OqqKdE)=>-l*hTzKT4yZ`4=h&rtCM^e#2+39h*yJPW=Dh?1eNqFx+(ji`@ZAkt3klMShepzkxh4&m-M?f7Vq_TQ?X7Xiq5vwnQfWxhi+#FN8b%GO5iGgV~^^egrBXRFHALGJ!4ve0)8 z=)0Lt-yMp5SL*bsHW&I~dUa{oS7N=89xpqgeXXVEVX`_CyK~t|WYiRl2a(bD8|{>q zUZAe3++q9vjO@DVqm|iZzi7W|>2%ehJ*Rj7hTO)&M>Ohif7i}v>CFU}^8N!E%`3#D zva{MbExmvbIqm%gs6zU*^g#IOk{?`7{(!2Z* zOaG!!kN;sw(jmJ(|ILzA#xT9#e={PL`EDp( zBURBTSeH~eeUtShExlZ{xzty|$XLsAU=#|H`AxNYeUuw~FOr3^;Hv|5YFkObA`2}cduBP>wb}I-* zQ<+rbb1e#c4wHo*V0h>Sv7;Bp!u_QAGa96Qt4V36HJ0|B=1cotgHNh!{y}9++>1y*%&eFAXZx@xMRhvBe)6KKI6o!(+Rg$ z1-Cm|>@!}>I)W=K2rl?>i>|_~Lw-23&VL?47sRYnfVwp6AauOxC1xFwZ#|dm^z`0h z))7e+3Qp3sV%E8&Rv$6zTvDsAm~}3xRVHSgOKM#wW}QoF^%JwsCAF>>v(6>8`ioiT zl3D}Atdr29`DLJ(brK519VBL*s{aOe*`2m~}i7L&U7( zk+@0BIu#O5`wbPdPKB7m9VTX-s+TQ zvrbjnaxv>vk%hk76Z&qZ(|0Ro9pADlvrZGeV%GT=FW9oC$e>xr#Qxp4nJ#9X^vZp? zZ*yeVRj;VbE^DFVtRuWZdrt2zL2hHgMWYV)YQ3eN-mF4aDl0`s^9nJktd)+lj$l;D zzpOPTwG>HJeQ1LtIL$gjE2ol=PUdYPuHvkNSvJ!KHicdZvknq>*7-}! z9yIG9rppjC8-pgyI;07+&QFl&nRU(xJ(_h8J9=W)kyHE`>1lrlq_ltF&D?(jzBDNX zzFw8$PxGZDNgezt!*Oo%$CjrhrIdix7r~#?DH)gbO!_v3WySKwLA;qJt|cbE5Iib5 z>vCztoB%v#BC*jEtcg~%2}2v9B8y4ir$n2wk-kOIoG`GHLp}*2;Vlt};Exbz%9Wxx zZ+@3j{jyOH2iEz)(8&9&`>og+Unu=ym?=|Y=Y62p^<)@W`nM7qY4=Sy_J%de%2x*92n!THAnl37Y723x{1H(N zpq9BW66-EwrclVAHb~MWGY{SSB{dJ#>!gZJ!bKMLgCA2^_Jd6De@F?z^Mp{~?1xxr z&p}B#9LZk7q){6KambC0TxG4cV#Um@6;ZVlp;n4nkgB#EYNePtRWuZ8rI=y1;87M- zLx~V8e>?!Mup%!CKd6b$nCugj05%`^RIeH4X{8TvS3Un0k(A|-o zOq3f7PJa-|$wHZN;OvKyoV>HZ2ORt;l2hRD)sG`N#lu$omwJwpFFA2({0o5#;4vF{ zz&dE<^gwOboogL;dV1XHMeZz!f;+v@9nAK#NKToXEx2%Q9=BrEeAvxh$1>b2u9|z1;T2aA z!7HvBIfhppcorjPw(hGRLPC!CDL&eBCt{!suI=fG5t!@%Kl zBqWgV3x_KP0BQ#5*6o22qB}fcZL(rxE7kgVx7NpcYu$<9U<1`Y*6k0Cpw=h4U7!#u zeUcl=hdRIAw`7%YD-WvtA*yodHc*dG{O@|a6*|X`KyA@Ep0qYwvEB>^Gq-c}#kwqq zUfWyL0YrvE=ctGb1TT9#1J9y9FrbVRVxxVFdcyE~E5n<6P+x`bMq(4hTv7g1pah3g z7;j-nnkb~ppLU0-EEs-nW7n+~0#O)#Zl{{Bs93yI3*C{`?cZaWG-6Q-O-sL9XK-4= z2{Y8fGcDzdX{jZRo$wZfa>o#>a|k~Kz+SOy&en(;JnBkv8#?xq2^%# z>Z3kT3q^VBrq)Z1ot8&pj}HUDE({SBTHB2*ngXqTTJAZ4NzUAshqy3Rnz%!!TAQ4D zN2=|qH5X&85q4{B;#s4o$oSXP-%{h3Q$;TbLPfh+g^G4C<{ZDg|61^y8Y~a-FR8Gj zJHljnxwqYH-ZtDU8)c3*^Tei-D~8e|O?3@4qmEgKKq@OYZ*c`bs*m-Mhr|_3c^3pR zfXVz%gWC4@)H6`KTRpYA21a_fb`eAEa%>!YtS1ixsNIP>IMqvmb)^;0L7Q%GB22vr zvG4#@o_18~TJ0nWnggsU16y2%HiMt&uMb=f8 z?P@}URA8{;VzgpYgH*Ay&#}_jDh4ZwX05=&igR5htW<8++ouL5W9Jw>!WwBs>);rK z7jPZM=mO5#9D#^vT;HM?T+MZ8kx5ph5z_-73iVOeXe+wP6>J11R=Fk$w?+~vwZX)y z$|hzwCd#c_&;+h?9WPd~ebC$xi3<}>N>;)Ia-bzg%nW=lyclDRwW52yUhMUHQIUdP z?5*s@Y{$enYdo6pc(K>(MMVmF;h1o|81*16*hfyc?y_Q^!UZlCA)h)IxLAa^kp)q> zz(197kgc+yaIW*o4+X-aNOxN^t^5q=%j(H+eg=*efTeF7>{Xs+im_sbC5>7V7>vpz zXIZnY*izS%rS4D@grTM)qJk$&g(omoNZTT@kK+;ZotWwJDJX&^50 zm}A{*)hL;`Lo6=U?U5QKsNyWW^Ew0n9_tRV5hLBXED#linrqFoYQm(uD0(tnGv*Wu zysE(9RLz*Eol5yjQB9K)NI1MDma`-;yS+3dBTKi>(K&TG^OC z*5S@@t!%uEU^u(YWVPEnrR;l$d8b+Xo;+N{+-?do+0-#60;%s5bE+pDeR9(~fsj-- z&75x59={aU`$GAL6e=-kM`wY|472v&zIU55&Du9PB2w9G^B%MIf{N;zW$K7UAoZPN z-iuix2V8Qp#N2d|r8Jc#O@tz}8M8JZ2F{TWSxcjJGrZ7;Q!S+Or2)5pp5qH-|(m6J^rOrLPGJOpcH2dj|H$^Vtj zs7IYfU2Z*W#Tdg22a4cx*Fl((>_iq|PJTBMgshU>i28gamh48bd~gPk=lZ%-0Y^O+ z=nBopMy|9Tv0^ciGt9{`$BMXQDt(BUcCI!l_yb_QsQ-e%ovURez~rmH8 zV7MoP5pE?w*Bj|z%wUv*vEz+)F#eb*ubP>&j)qDQ=k48P*12r_Gh(xYjofMNvI=rY z8<+nDaR(2D=5_FbENEUA!^}|-cd9(NJ5o@~`5_HMZ$WL;y+kqLhbpGP)G+4Rz$Q_h zyRAJ|VJ4RLWVkTX&HtMUj5AbWrkl@uk;1G)2gMI|bw{KyXX%r$U$Z}eiy`b~A4Cdm zWL-LMXG@?kTN-m9uukOujPN)!*^(yNAn{d5~x<3`Jmy5aMe#!1gJ=>|{ z=zEcRHDATtpBH;OxPS3+q~4XQA$N4{vq-%z-hB$RCV@E>eZIUycmdyK6l7^ z-m0HP^-y2z$^eGliu%>5POu*8i+aGZe0QXN4ti34IBu6`b=pO2#DFNt5w*ciLR z^=svAf%FP5uq0Kzs2`U`9dY`?i`HQ)wvssumX%_Jf(Yy&0T-2FijBh0-<1r1+~9l* zKkQeEANDZBsb555t7+JKSu7@j=ZB$i4Z~Tt{1wqoz>|+fVr#`uUhu4vOX4Te3$Si^ zHLwcYW+Pv+j##nH!XemC*z9#^v)7@`6%N7X!e*~S+sL7#fpReDIJBJ{dQEg&$Dtkn zr$ZB751bPYy==W=#qM=$Xl{i=u#l>V;BLoUXSZX~KyZj6MqUBR&zoSItH?yE@bF#B~Ic!V1vCnB+zDCS$x=xbOYh)DTc?q*COwDkK_ zv2O=vP{r1Umi~Y$_MJc<;?;td{?Msdsr=o*Y+`4D`Y}>}&)w9Gf%=!E{=QfRK)YN9 z>c?I6OOIywoB9RIt*RH9H09$!BhgZCSZ`VlT8IrG@uNBd5!#ztAf3VpK0+^6U^kn2 zh=-ffq)!4_s66^D>uswjH?deO%EjL~(xgvm<{JHu^{#altVP_#Bw=aN=gwmDJzQ+s z3H@5O(C?XapBZd$wHSjTSKRd(Zg!e$VuLqmFLjz{BCA0MwfB6pK{vJb*RaT|3xS03 z?i6o=abtm*ncN_Wl|aNJW-LT@(8Ctexr@xG*5Cz3lBgr~e!y&SP?Y|DD*a-U)_No( zR=o6%LC>V+CaX~>eEuPYZc>*$Xg*}t5NiTjsp6_%iefRAm^JE_Ej5>!H5ww5*09o~ zoPjBo095yI})$nDcpCX4$2#yaKv2*sNlb=)6bVeuo>`AHUpA70Ln zQ0Ir&lo=_tlkqY+ZGC4&$BLckeL+CJ+sf=vTrJ%7Lm-Z_Ct z*B{-8yL&R??v%m8x9_bVtmyTwAO|lfB28zFSO5AwL%K%#LD04Ml?I5fOQHB<9gm3k z`ty(}@Hec5_6EA~JeUUhL^YDzltuK(8bmkOBf7N-(QU1WZtp~NM{lA}4dzv-yRwPSs896n zQlc|^5S=xQ=umzEds#rkQywrg5hAtmN+Qtd!R>1WszW7u%mc*%3)LsMuQzE zcqGz;8NtpCQCdjob-@%INXEKc&tBOjK!5fLfdTA$0>fAfkOG;TSvvwF81?|*jAq@4 za|^qHz*shxz<4%;zy!98z$Eqrf!o4 zXCVTcST=zztUiJ5tUZCJSQ&wxY&e14Y%+npY#xDqY%PKPY(Iem>|Fwf*l7YUFzKww z^e_t%IKr|Cyu#`eILcZPc%Ahk@Fp8V;B7X6z`JZVf%n-`0w1u)34F}<5%`oHCGa^r zPT&|jN8mV1IVapW!D;^~?mI;w+xI7d9G`hXj>2HJq=(w>z;mC&|c%zzcJoxe4Bp+y_JTqUK5v7e9~5;w;dpQ`&&dO z|3dT*^qSN8)qL|NJ&k&vQ0nxk95}lbU`7nJ}HqqHliQdzV z=$xB~-aCQl+<8RjttC4D5YYw4i7vcAbWs`;cJC`9dVg!8i@OnhpdZl(M-Y8zGSQ{8 zi7ta5jp*0%JwzXVh3JZ7L|1-Cbkzl->ymv!{?T-z>+2BRa5d45-HASaqo939O(Hsa z4$<=EL~q$dbj-6v$G%Q<+!sX0|4j5&pN#qSoe(BE(Iz^n5z*T^5WT&hpx3nAVKjwE`H;ZWZIME(OM0>U&+Ur`Py@wLLb`sG( z3yAi8glO4TqSqZH+V6cq2Th~?IcPfd$3b_UB>oKYf6(33ZwJlnmxTTdnl+Z_>^Vg5 zd5q|s14Qrrg6P~oh|UWp3%&U@i7qH6y09nFMZ<{RcPG*N? zy5ua;rCbsAm)S&@mk@opjOdDSL|5KJ^pUkhSM4Uc`fZ|Xz9G8y0?~D8s<8WLA<@U$ z5MAGo=!P*wH_j&d_*$Y*>?XSD2+=1$Bf9x4(JdiO*xg!-=(g5Gw+|q?;~t_wCj+P zR|S8_)zlA$w8WSHfL}`eVMy!GD822UMB619nE#OW8ALl261}E1(T=@{b{b8z^Aw_8 z77*>W1!(l4gZRK)ZuOo?)l%88YVfp-RYTNOl`^7g zk{bIM_L;@!q#8*kzH%GPgk&xd1}46PZl0bF35_rTQGtkJkm>KIibG6O;HiQC;}FwF zo`;{DaENJYQXeJAo=COot0N8r>q$86^eT02Ho(nr+G#_ATT;0+lO+|Yeb{5ERs&*g zNR?)>eQ^kjU62}(^6>)rlz0!Q7lE4ec z!4TOL5bE_>Rh*1E;Sknq-W^bEI1SuRM&O)`8iox2_sOW~Af+dvqd7I<%T(|lfZ$+n zPq6W1)CBvAN-B6Vsz}R6BX(1)ysJX>AmZh_C!?aIWH(biC!;nLtmdlcWK?3cP{qlp zq8uU%$u3dF$*5SDWL>haR>jGvURFy~Y#zE=NRh2lRBRDK*O^MJR;XA~iPc&y#*aI@s6rby1y@Q3X%5=udDZN%5|#b24g0 zf)wwjIwzwdPl|U}os&^1+Cz0tMy=4$Dc_!|b24g0f@Jqn%kX5>M2k45{`E$FZ%>80 z$C9pwPMjEL`oJTnO;mA|H}&p|k4lxm$4W<~3Wn&#*^RIRriM%qQ4?&Vu_})8#^xk8 z)`yf^tkO~5LJ|{QpYTEo>cHRvVPK*=CJt4>98ADyumF+ZVPIl3DCAJYY>OgYQ3O$e zh+<&kcxY_E3{u$&IkX7SX;prOw@f;%Hj?E7+!`##JNY1&tp5-^ z#QoL~g3oi+x{2TmoLfUx2AgH_i#%wRtMw5c=Hb#?)Dna*@h~eLrr;SwoX`@v7NSbIt@$o!~ zNip1{6VpaeiuM9LRy8s+#z3=%21D$)DsO=z9ar(-RBnwYk6S~LZY9_TiZp>>TPV^* zg6*J4lL)qlBHc!?0}orb6TF5;t;qyC@|bl8!A?BOx|3jM9=E0t?85V{sp_U|C~;R_ zXiX#7jW@EUlR$S~V%pTC)gV%X?b03HITAta}Ld z<^8NV1k3n9>t2G_@xj(yg8leVYaYSt`3P%1!T!A5T0n3BA7?EjIFL`Y77-l8CtLRs zyn#=(?k9L7pJ6Q~IGE429w7K1KG%AX;1IshdWhgne6h8J;84EAT1s#jf7n_^@MgZs zT262{UuQi`a0K6Atspp(Z?aYr9L2Xj_Ta?^+uOPUIh28`T2rw3GO!*5kyzji0ifAnxt_ zdutPMC-ZaGlLYVJ7fLr1ypzMJv0DgFfp5l@ZY4aG!$CybR6Ku5o(6}NmhK=tod;Rz zQzUa2UO!K(%~56skFwI8Y6rx3b2xo=7s<@T>uR@(gN;0k7qZel)Xiq|Myzx%;d^)q zE8RzU4sXXwpHXqkTfUcfWu?!Oow>XxE8S20dGIa5(&vajpZ8;>2gtVtd>|`5NO&P1 z%t{ZD%pyLNl|E1SK0bn#zCieXUd~EiB)pi9W2J}H-st}Wd?G77Lij;GnU%gw_#yam zbLlIDm+%>^^i|Sb%4f6Eqr_jv=d#k*2ruUgS?TM9ALff$=^KPs@FlGDO~Nbr!>sfz zvIAegW2JAa1JQ?7d>t!&m&&r5Z(ybGk<1#tiIu)jcrD+`N1UMh<8UTW>F0!>;2*NmFQ{EM@nfv? z7~v=RDOUO=;msVjIAgd#6@#5DWo%dd{shA(#jp>FM4uE|27)iCayLzU(Ci&mz9BRg zlD}2u{X8}4TQw~2=GFUlZI5qv1({`(QNuhTt-;9UsPEI zzkY#}$db7AqgoS1lfl@}WI^G6D$`G@JX?#Exa#0c2f$}kSqAkpYEYKA^Whtrf%;AU zVQ9(6RZ{<5m2cBxQ(W~d$;7j_V|ePe<4&gkRqY6ClMV;O;u?nahfjhyXFv~tPlBx1 z_JAxKn7%diJp4X;MPvQatG}wTcIj%XcVSX*4}s)m^gu@TQ8k!8QBT@K$}N*CUePW&wm4lnNNiiyLE<8Nzl z$ZM|-$!PHpwU0E`w-OJ2EIg)d zVAtRYXII0IHP(wu@nza_jlpO1)csIhQv6|Ug~qzLQG2EKh{n1)hmeuPDs8pKy15eZ zHQHK@b$6roI_*)7_3$dRSK-t$q1yg2%|TLh0Ne`uR}ExFhXVLVRYvtp`dckjTDiYJ zTjTG~8IY#5Q9Vn-e96Q!xPr?dg$pEtIWu7qwFU!3L4nTCf@sQY2=K_J;8E1T)B+)l zQ4vf1?7Tpa?J;3_%@nJQK&nqjiE=7;TBsnur7s zgJ?5|4%Vt=3X?Z#X%m9ipa32kWa4ohW*J5i1x0|-;3(wb@h>KxQ53ubqj>U>ISY~C zVZdiXw>R-DwcsjD5C{Vk+cfwT@&v-b#AAOm{V!`#rbqHNydz25G;6z74f;7Nh)DT| zfPQ@s+;KBU2CWU?z|@Vo{en#u99;xB0MwHVv0sgubUHTY6Y{v{F)Yj8q%4R3-J ze@Q!{v6>DK-_F~rF=nS58?eP{rpw@@uA-+Q{D}dLoS$wyj!jp`lV)tvp43=_^i-5a z70KAFZP8d`4@WYwmzV)a<%ohPXMP6#%&d+)fpD-bz#v$JJb^GUF>2!Y9qfz=hzdj$ zgO>g~^sO>GXIr0WH^Un&>~Hv#Kto-?f8fafM*3IC{nwsaI)R{+C$Z9rbk4P$2g{I2 zbh5QS50)Oc>HXl1RhE=j=qtUQaB|*OU+HARN?xJd`VO7_1)pFT3WEAbe8064X99BC z+zAH^fYfA&cqQ9Rxri_1Yk*BXPneW)r|Q#m#wnIKW>m_ZuHU7zz(xGo?0S(>=o-v6ky%xD#%QEMRw07sH*GwF0Fsk6_8*C{bZ+B}!f4WyK%So#9Tn ziuzJ~mF^6837)-L7sH)5gEhJs?mVpcTHP7$Dl~O6xlVV6yQ&F~>dtUiks!q%)1BcC zc{toH)*1WWHWp%w{cQgVEvuh}Knmg4TcDADw2ejBbia7gjQeyL?tZly*w9f$GVa%5 zxckk+kxc9*<^>2iq9DqdN1>mYK~UoYZHH(92EjP;1j4|?Xs8(kVK^@yz8V=eK4ibk z))V^ZkDL3I*Y%SK z{r-X0J9>-H04Di|TJPxt5GMP}tq=735GwwO)<^nwgsOk4^@;v9Ld`$h`bTZdu}_dlWRPwMhs)4UTQ>H;6K0Xs#0`X0{{ElI+0CX=4 zZ3MDKm-m_GW!DmubERsJ8fYg8mUeSXRM~TIdwO9lE^7G=E5>|0&Y( za}6OCs&}_8A2!VZpjvp2v@Xnx!`j8}zc{hotPRP$E(D<_5bV^J_q!9Hh{B zU_7tOub8F;{l#M(f86R3;7Mi%ggg_1Pz?wU>9Pi;J*4N$vRhhzqO?g++Jic}CMQE_ z>3goS5{SCL1XXxYRAl?2F28DqCWFbBb@?^ZoCe6_z;cKRJEC+_bREQx=<-eGMgYxU z(nC9ezNX8so8~@19!+uVd`0C*(a$0Is4fpRze3Gd_0Z2i-`3?fO!EvNr#?h}$Z!w~;amrt1HYCv9J#X;GjXMp>$E`MX1 z2LX9ZegaW4c?^(b{s|3gqA#JJApWT?pEAwg0eQV7lO|m3U+Hr7pcw{KTdxBleT!Vw zk?0vEz>}hlApAm?N14q4H2+*TTLa}CAPDt@;J7Z|V8S_8P<{PP>loG908_HQBskDi z-AWx~LiNoCoo{t{zHQC}ObC9{<>rv>Z;)*xH`~UEY>N}+YLdvd zX(HQZnC(z7bWWFJIp#<}&iFhDAbdm)!q998&O%=fw@f;#M}6{FZcZl>&GR+n^oNea zwR{q5@EOp#pvyCAnFj$m&GR-ur+GdA$UlW3bP|H|y8LAGtFGl%T42-9L35yRyzPpxr1t7)*sz-oJXRY%f1P27?-132Z(d(!;GD?n`GZ!> z!0Cq-!I!o#H*oM^jo?e$R~Yx9S`+xvcBX-!>|k(6sFh``Mp))=WK}nCl4k>8A(KrX z8@d@5GC9T*C@|jNm9_`d4R(^54*`!a1aT9;E@wW5QT$kuxfK!qIxw+VK}-nZRU5>8 z0rPcCeFuWjXApEZSfixWr0xbBKAi-`YW@{uWF$$}NPjzop(MZ6*}#2HmX@SiT?k$V zyPq_=vz(-$)zuK+W2=!Ab-zbmn3QF8GjOMuHB8DUudg=P5nre~SnXu67CzCSoeV$w znWeRa1}QaIS}SPO)`pzc1{$`lk(AcXNKR{SC}|y_k*_hdw2p40;UExP0;aeq7?#Pz z4L=h`f_?`3)Za3xpHafu2Y#!s@dGf~M}Dca%=i=G$9_pIz0SbTr?XG| z5|?5Kou5_-3?rp}0&IpM7-+Eb&>IFC`RuETFRXt9yMrZ#|-7 zCkTcZY&X>R5Tg;Bk_5hBLD=0;-!~dKFSEH&$OGxJ@QTt~I#=)0=Kzkgx*d)nWl(80|eUrc2*Pzl8x`bn>BY0bGh#jbI;O#90 z`+~Pl;|~CD$55t2z}vA@hH~&$ti(ne?2OOE&SQ#i#ACgSe(@mmi>yG}V(1qSz*_^s z)N#<>xV8*p&kf>|i=!IHygBwWj#ZNB|G^NCf#5?w@EFM0Q_f=`2%N`2@I8_L{TPS| zQu=w&s(1`Ua4@?B8y^Enu)~#9@G%gPb_K8{`)&N<*Hxi>%-q9w9|J*2$$rQ1JO)xA zSnnF1$3Teno*^Cs5#R0sJO+X<(2;e?{=g8Afp}RTqGIzWUvXoX0>Y`jz241|q5*izLNQ7|vrL9^Ti6^B9QW zi53-iDB>p#=P{6q1S$TF;XDR{JSl$4a2^Ap=(mRR7)XVNPWhfToX0>a5+wUOLp%nO zXc6ZzknfS-+f(7rBNwo`HpEjO)R`}S>f;aSo|T^Z5Dd|2vtP&lmKsU|uhj(Gc*77+ zePG`qHWpvXebb<)K7=GDx=(%JbrY0;!3Dy=M0ZU5#Aq-b6EGUgK_qw>m>3NTITR5$ zuYzq*1h3-)3`~p$aa%r!U!6745Ang-;3$;C&5HsIf+D~H%EW|Be=E58ys+9_R>HsT z4&Cw7YKnC=7o+Is)g-GW!7r-$tx|%=s;Sjmaduxdb;@Zd*9-_GFJnG3vRmU(+sTU?C^LCJ2*+i~__HQ|D~1g+Wac?ZsxRikZq%vOqD!#i@e#Z`)T z;&88NuN$?y@UEO4_A2Cd<8U|WIC%|)pM}+R518VJ3PcnG6M32bK5*-|Vbtm^MnP5_ zm8@&I_|##OsM@{{+?LvNJ(HyN#c&TwAXwdCvwe&P5fz9i1}5^%-ylf!Pl6lAE2CCV zvbqYa_Tnobg{_XNv)LrB-gYxsoy{hggq+8C3cmxZxZB?F)j@DH;kR(x+aH_9Fanz* z2e_CayTtOXgM0vpvaYeh(5{~(>2uDS#jGz}W>@PNHDR|{x%DNVgQDGI%HQcIg z5lcJHrL?a&OFIE))>$XTWWajHCR*PR>>ZnGog&yLHoN+_oF&K9HNQePSPy~ZWo$!6 z_GxVI7)>FQ}6cwi`80lfQXgg;M>{?rJ0jk^}5IFGoR{ez6x(Ms!X?MVafp94(*+*nrrIF^&xN zM8gR102?S8nN)KFfs_CnL>HuFn1Nt`-AL(G+CK4xJ<;q&D3BUp!~TCnVPyOa9zLoO z%EK?D!-8--5b<0v0%t+kemKCgP{sY;>q0@RMzXjr`h}wS6mTJ|tt3vlf#LIUVgeWQ5&vVa$aEV~Oz>V!g=4 zf>4x0WFgsyxmXZlBan5;eu;|(p_g?86`O}v2p`DS%c$5QgswA{Sg)XBNhQ{+Tr3E) zeyK2@kGVSw!lPU)2tBFxYrF+62)hXdVKyIgbryuLbFm=wq}p#_4sE;&_M2$7t(O&l zi#rQKQC;!3xw9ao=sVn55Q=KYB1!Rgxw9bj@ZRIjf>7{8i_(JdeeNs>D-xvm2i#c@ zB2S8c$ejftML*)sg0MnEr+h!=&VsNaL9##LVnLW_5oba8De`-JD%@R-;UpQH5BYL| zFfcJ{4uP_nIAsJkVJe~m5yiknUJz&6;47GlNKB%+0Y7W$OS9ly;V_&l90~Z-(gIm& z)dD4H(E#kAqz7OJB_jYkD3=9b2PGDm8ce%9Ff}XfiojH0W{qLrMntS*V3zb4QTGR z(prILYtU>1nr#8M1Kb{P2d``w@FGJ_>j>vEcLJHt0UxZFW(PK~@%DVyHK3h?E8}ik z4it#(Jb=#GF`d2X}2%0+hu@R2dL-R>h7omJ1C2fV`` zC>Ko$h|%mRd#GGAHE;p>Put~k(M-ZS?b&kC|D)|qfTJv)|KVpy_DZtP9@z~cTp=Wz zi$K661VoI2fQpI=ihvpvG%6vhBA`Noh>9F;7DEVE2!aTTii#X^h+Kk#fT98(prWFp zq9CWr`{|x}b~gck->QGrOBI`)nVz1W?w+2Wo@1YpG_3U1MO`(~E=J*Sz@>k~eih+E ztCw>P<@9*g4*NCAcLNfUkUb_{kk9a$!1<_8qU4|5>`PcOgLW}S=4xrv?9bapxq?|o z#j#cP>2@(r{*pRVvyroB+Gp9t6AiO{v+Z;2VzqD0Um!_Ncw5Qk%rn*6bE8D9&xoef z;dF)ErV#Ue*7)IOnqb5WKBp1?THy1<&$a)Bw!}hT;zUtvw+5!d)75~(hWj<3bZ zP_M~F(XOCZITk`Cp7yn$Z`aztU>EHdKqY)H+85cy8XuVx#U+OKy<}f(7i%N)`6p*- z$)x#~*k87bb&-W^llbF2h~l58)qJnmm)gapt<`+5+F!Ga4Gjx5-|O~ecJW@r zd^RxZoZcJuH|=6$LkP|HmVLQhY-*Sa;*1lt3V;5T^@$Qnf~zxf-)x|2{(^Tpq4UvvtnXZy4( zXpDSl7oy6Sun9wRGlu3C4BxHxsD$@1gtysE3ES=Fgb(Z%Eq;d_6?2G;op$RuCL_=E zhYqY0eevzhJg_E#;A1)oMojiuVLqvexOj|9S95&ywTn+ojud>$rg)lkvD~IR@a&h( zmA(x5xaAdXzLLD`SeKUB+$?mL^)T}5t=u`|C**1=18D%cUB{7-aGV`G$j&SG)1hoq z2QBS(yIi?50xDEu={Dh&_j(-EHz~BQrwzu_rYAdZS2* za**Y5{)Ue@{ze}T>O#xt$L;Jz$^N8M0eeyOMR`%w_W$;xK7j3s;SL$Mt2l>MJCsdc zR5*KAy(;8IaoK)kYrbM)R)Om~GHDU{h8IPUns2Z@!ize>S$EkZyeOG8AnDZ*&h@)>goaB$&DDMt@xJvL*X&1*4cdxYPiv6?|^A_^Nml(Hdd*vI9 z+bOcK9}sMxDqHvk!S?6uWRJu#1lt308voPSbeU`yjia12>|qSq)FA;5oVVCxs90WPB%(e3-h`T=5;+x z>$~u=HrO2r??rk^`vEslBzy$8U4YvSxIKXT7{1si@WuAR7yFcavCqgC``d&Ix?C zCj7;pCnHJy8>Eh~C7cGSXF%#%{6DA49cTX4v;c>{q94{Pg}6Dz2!>S(vu-2NJ(FUp z6c+s?ju488DdQSdiWr@J#b;BTl_FLGO-iv;ia7awa!TiTyH2A_yqJ;^ugkA5rR2st zbe?FJq!h+Gb)LdrPAQ6a>FhqflF~iit=po}%+i!z-Qp#{t0}pnn@9Ij_M0h%qMKJw zqVI2|6p5B5pPnnGrsS(#smc_DwA8j#Fa=SO1pQLQc1$-WK+K=fgQjP12j{VtR_O_B6pINNuJ!*G09F^tI4i z>S9I;hsdnG3Bs4Gx6(yT$}LE!O1>1mwJv6+42;N1)zfq_D>6&#)>~4rJjE}%U8g78 zfZ*+v(W2GEy6Bf;#V(x|^8h}y$c#rQBV~k+DW_k`V@O26r99Fwo1t3+->v8YBWxHW z9I1=jQV@pGL=ia1FxsV~bcE#wrm$owEXPF^)bgdHb%f;x8AW)gjcUoFM|DINZ%<*# zQ)Drs=rJ9U#XBV1D6$w)^tg`5;+;ks+oQMmU73>EUb@BdpL7BOFR)j)*cDBOIqAMp!~+cx}$;Ta1hmKA|H<_<+%z zOO}Vo`9ETW<8{OcA5ICy2pKU~lrh35b;JlqP^0k?NE^loC+LU~K1xuY2q+mNd`d@* z@G&a0HYigu!l!k_2p>-g#R$WFV+5Bzqa#K*CM6Uj43%q_PSg=29BUNCN&F;o6(f9B zM~txCkf6O`dFM$wVuTe$n>S3G)_Jmy7~wd|P6%g5#t5hAh!H+P0FA>qwb-fpbGlfW zLZ*ZjPsR2EdZjK_rO^9x;p`ZK(qe=9G+nGtAwwj~C_9Egap;uF-iat<)k>?SBFFRt$s2&eCV=VnYfgB8ZtlBPe!`K35m-u@=c8`7E)L zXpA)EYl#$Yt%u=npDH7H&TuQHo*$izw z>A-+&t%x;R>oFEFA8$BNww8J|4F!7AfuSVNuhIEQhkrnsg!08ZJoI7%6^rME+aa@Y z)(#nt8^MY=`sSoD;gnqLq4N^s8ZaAy+O9ywOAPv=mKdMX*Z&!)Jpibft@sE9YB`4; z(oi-nF~Zpg>s6s82ABOrNPOAEssh(>BqH;TB?duizBYQq65}*y<>(Ph44Ktd=OqUB zgPYKN?Q~vZ5a%Rz&6lh55+gFJJyq=LY6Jn`N5>g`2da3vi%2)JE}@D=^|JDGUSe!F zhtOwG>uQOSuk#WkqSRNQ^Ah7MSKyX1s8zMZ=&17&Bcjw-NG z(A5${Ca=`h5+hV2Mz-5cS4)ggf#&P3513X(T9VQPKF~82l<6g7c?S{zU6R|K3Yci)f2^z zq)KtU^kV-^8rLY^X0eVm8WV(3Hj4bpGd#kUdIIX_~e2+bB+>K*!>x;SEB z=P8a$=|f8`)(7k2C!;i?;4Ye;e^v#W?{0mFE{-PQ<`l*>)5IO%WptkDUfn6guB7;( zI^~Kz;MG0)tsqr=oMhE}CFGF(7D+A+zD3Myn(sdSeqH?D5JHQ6Kp&=yf0KBM=2n^N z*iwDCF3vZ|Rx^H??$?EA+Mr6>gZe|dXw*~+Y%n#?PJrgP1sdZZFWZ5$e_9-Ub5bh~ zlC_gM>|_vfeVl#*p>H{yqz?z>IjK1(%^QjIGaI#fixk&<taa@py3m;a+ibrv#6pj_w-b57)gluaN5yuJ7|0D&i!5Xu5{*6L_D*z$5~+P63k!!%w0(ipZh`6S zP9wEgJmeL{o#-4M{uM-jr!Q_K8oATy8U$u<>QI{bL6XvIy3@Ewn}x@#Ue`ZBnaFlq zyDif{qpxk;6C1VOsS7JyirsFg%nIaclw&h;*wV}Fk947{Y|XceW?iRSj{i?lSM%+r zS=Z&J8p0~G_R!?%c5k7^xCNZXqscSgy%klAtWRk2^wi7ROBKED4;f10`jjdrxEDuc zeMS`<*US2x=ENrM%k>cY3~HTbU9;P_kEX|@$kNpPG(9#m`jvWsrpM-?G!>>81#C`I57GqK+Q?&IiTMysfT@uMn(rH$=zI~( zQ4`>|l%F2acQx6#O|Q!NPU^#j^W7U@N4@!uGg!naFU$FE13;|d?ATZ2d`FCw*+jFJ zwp7h`3`vE?d`CLyT!W7|{ze}TBBvRBsN0q;qjyNJv!34K*)W1uUMK6#|MWWH5F47C zC7%uB9OkRkTx65i8O|nlD`iI+d7WJLCdt?LhV(kuA`zKyc%1~P`QD85I^W~0w<5hx znYCPco!k#@0(aM?*D3X1;#wiS&d98l((9D^FLAAsUZ>Q5nYCJaosn7ZNU!s9+|`l9 zYa#v5arxFruQQ_5w^n+cey+f1GpJSNb*__MXGE!Qz4SUGv)+|nXGB)&2E*&*E~dU` zc%3r2(eOGc86%Z{li_tn5QG@P7>2-z~ZfN{xr17t!-8fQb=Sup~C91md9C$8mr_ z_}gLn_7gsE{YOuX%4?(Mw){4sDk0Du?_Z(n&I1zdep&e7oyAB4|>$v zCJtIEJ9xt8lLwC3XzmD2$cyIVgM!|U>ru@eeOxaRx2MP3togJ%J~&;AkFoY9qkUJp zHUCe2>tK8xon9Dq)p1?inI2``?SYIw#fMfe=OE?uI6;Hu&h*1bL_+rX>w@E{4hsE%pp~koTt3G|3=Z>{*JSmGCbsSlfF6pLUMc!1pN>GvFX-X@65l z<^G5)xfPi954m^`7|8$Cf3S$sbaLPns`C^*8Xj4X50=Gh4sV<&3zHF3fRV&|WjahBo;ilZO$wV7gD z^b`jE7QmM|rnnwe;QkPJ;^}yCnFEd!Jgh+DGx4G;|BQxzdvqaS@jD{cv+<&bBM!(l zZI<{TdITXX3%Dv>!sR%KfdtRYc`hD_G{DRTEpf3(&=pw!htYi*Nl^PcUc@`jkOHOC z;G617PsaD>T%d7BbP=H}eL8_?42Eco=u#w&ju4I79I-PxmC4zNx;Hzfx$q7Gv%_li zFQ}pJ&5jdbBbxe1@$L@>e?A0WzbOS?C3ZzmWXf7J7Fuk=eCU&6bvtHt(Em@oh)tk- zDd1D$zwyGJFar*$e>e9CdCv3k!pV7>HdpM4PGn*q0J=@NAwqG$N7Jgr$I*Sj5m7oB zrAu7xI)obQq#Dya;szI4W&B!giSUS5IN|>!x(-;;SaKtec+1rl&_qp?M=W;{HPrH6 z)$;ei{aLtW!i<(bjott#QEEyQT0+9xvYUbAQ}ME!fo2_SA|Zj?bF^FatS5!65}zqFy@956!ghrwF@(kmG_4cJgaXaoK$DhmgG6Hmnsfur=h0XcfSTul zCO6@$3(=T8A~%6z+6qkvMxzbi7u}sJR=K9TMEf|XJ)e=LU$g;| z_Hi5-pNSh%gSeuliO`-+sCK=c6XN|q5C}y6=CR`01Tto*n`reQ!Pcb)SE{Jd)*n53jJ3eWFd#&-z(-$j!d0RpStBd%!t z1Tjvl6^EmtX~0AW)|C*`j;Ic`rS0jK;o?_A_Xsrb3(KGKB?bNW|e0*SYQn zVrm7uB2(n3suDj$ALW`|Q1gX^AM1Azn175u#N~rfKA>@wB!djK(G8bJ43OP8!e*NI z(Ag%68`B{7|HSHyJg3_uhH@Uz%@sdO&X>IbwCBTeW4Y9dU)XGm(p@P1wtzVol3JBG zs$}vzN?%PlS)XL+ImzVJNSQ1*Wb&)(Y-Tf|tx1TfFB5e3H>ImrqkL;ZlltW}`d})z zvZ(~{@6pScGa~`KBO&Laz&i}!W6^UMcmaSD8V4^5oY0tGi>MNRM0=o~qI5Ta_a|J@ z5ST6Yer2(PC9nuzNsIkw^fIYO$zb-^2`qr)(Gz4|OX$m?1lAYQm=n<@Oi*5Pyu;DB z-vw=qi1&zSz9s|UzZCFb0LL`GuOV>23E-HQUN~)U}xj87Xrg-IUBRpsuHJ^ zom>OpR*he52pl9bS{Y=VR=`I9oY8o7LtrK&!yw~KbQO2d+d^pV8t-ff9318Gh<2>F zp!uxAd?kPj8h;~!N#)oK)Z+GN1|us*d3pVz2xb0TO-$t|-5OSAHbOJSKWhH1L+Q{a zb6v^1LZ+Zh8nJ(s5nB(47eAJzXnTY-V&|2S`WmIRuBT)<(e@~n&lBU+Z>NivaKx^S zo9DV>cWA`qkztuBo=_mI0W!2nok0R2VG^pucoTFDD%TAVWlgTAPa>u$ztri5KHk!NX0Kpq~FpQ!%)N!JdJ@)QBehq#xKb!IVhME1L_LX$`vPj*f9y z#TIA2%|Wz^Jx(p^su+iO-x+1?gp8Q3)Ps6Cy(p(gEPmz;P}d<53E3m=f_#S01kUYF ziLxIqV#(8!nRbWxNan)FrW;re>WH%&u*T8198S_l4X*tbQ8!BOLE(It3!lo5Du*1% zIJB^$2||v9BhiBd$8P3|rxgQgQD;?{0W7~d@r;SRzfu=UP}7P@#KG^lq0Na(a0gKH z1@<=4CW(#Ogx)@Ds$-hYqP)CG-}+*sDZhzFlsBQZg@2N13_!^`qk(Zv8t+BkWK$oR z=R_E8d=rWt;@@0cYnBqbp%o&Xa4V>zX!J>Hbof-$1OSQB1+IlIQR5^J1+COLsTE=a zP}Oj>81y}7%44;f1w_v_DY+&MRU&^lvjzMN#h1%Q-n6F$(RcpO!A0XP3Vw*Wf}@! zbX|#0HC85u$IAZ5v9e!|l`1jABpt3DsB^71`jZgHX|xbupaaSLFd>?$QN!a5fET&? z%VubJP%~iiOvU6TsX}|Z$;yi|`Rzz1uar!#60=OlSjlfj%`M&u7bPQ8k}*fj=3ykt z#sm0{q?cR+?~+tw)j*LEP)#oG9ZAyl4qC@q#hpn@q1q_1H1#0ChN4Q$F;#M_+kt3k z(!`;m&QDdHM~Hu@su@Ux=7$o_-c&;S^ZvP}zEYK~%{*dw5?NuWkk%XqJc%6)4Dec0 zGis8S?8f3B!grNdBayIqoK7>6n`-vq+~>o5hSUQjK54V-HlP+KJyC{g{3(&bMTb*M#fFqB9H2dkNbP@ILP8t%nSKB2vlbhIH(9`|o3oB=CvzG>jpfKw)b z=0%+ABxcM6nvF>@Fp$c8OL+z~Fk>U5K@?z-1TNbKU~|*Ou1B!zrVQ^iDSi!#V{S@{ zgE{>rQy$|!#o$Xq;Bd_)c)%G6zL<@uDD|Zy3efar*E143Eu{z?2$!hA)Qcfr2|+-% z3{fRsHjU#ZO90_)`jVnAqCsf6v#DBO(hp)dC9!kGE2d_vo_mgWg3v|TYOVKA*qEH^C%dqruROrhm9{ZqA$ zNSbQB#OD!ts=;7!I_}uA4AFW?Nx)Aqfa*7)}53XwpWmG_7J{SGZQX#3-GtBp5V0+apHn zl0m@{Z9L*pP5|>NQ!i$%B}-_3O3buWRhXT?eBKzRq@FSXomg#J#`Ug1y*^D_glnpS zHwqo>)08&HNhCni$LKnFJ2K&a$K(eR^fnG%?`w43imuqp&D}Lc*M(1X)7N6bfDFjxmA$3mf~8F+hNqay=j*VNc4lM+YPE{YLE&4I@6bu zDi5d{Y*6I{RYMh3RbstjTTwQg|EB#UZkk5bxPR9)h1pY#@<*BuQRSf8U@Qj3NMqbP z+Io+L;jkH8pkJ<;6ym}rQ!j3)M;q9vrkgI>P!%`i z-^>hwOE(PinyoMl-=eI=JFYeVZ7qy$hpoldi&=|M_up5t`4UvU*i=V1Nn`3O^^4p| z*o18oxNNO!ol9)vD~Qk|t-S{cUY*Yr+f86B^r&r)&>m{G-W8AVkHVPC65%l6L(P=0 zQi3_;;bt`D0MWhhRUYPx-0;0VP~v<9h~9PWzy)oEW)7RN`Z2=~74Sj;4|Z*Eo%uZk zK3{=z%#d$v*Wj6>Hmq=a0REn<YB;(AHh`>um|%cTvTA3=;RR}@e2|Dk4?S6!?LL0vWku#@uI996wk&N% zM^f<5|EU}&QNeM9=zWRkGZS1&00$-tAllA|0Q(*XOi zYBcb>hDJY8jRp>BX!KLnsDHnyi>$IwL;wCz{R`~Y(7!(=)&r&^+?BvKYU;Sm{KB+Z zW^SOS{*p~K3S5qsPRf?B*hE98jov}xU#jsoDpzPGrP45qBt(>crAnuxwDAR{Un^~S z3#E@p4W~8sE3)oE85AhngVG274CyzOk}h@e;2kXjq;t7+6!%B zgVI{@t(uApQTmAVa>?%VI-p8?r&jv?QMxZ{uk`iFV| z$ZWm^j8Wqw%SiOWkjQSXhD0xDKpP`bbQyL_nlDXZeLAESVFql@F)W;&(ys7`dlg-51kP=+>Nug8wIJ#%a)8=@D0RJ!JpPVul7E z(U3jR$o@s4fL;Q{2#MmTLIJ%5>|+M(uLg<@ls!de`+wtk5$;AI2%8}Z`(0v%q5|nc ziS(F63jYn2Up8?5A#q~A4Y_Mot~UHn_F+WnX4e*%s54?ab&;{1IwQ7o+;lXGq&M;s zp}pU{^+l!kesd$X(*Z->kWn!V+6P9WKjiu$697??^mZ5mBkAol5+uD(4C!qk>7AC+ z0|OwuGmOE1f@e2KhK2XJM2pjK8hEr%^5`!fl@Q1_;{5l_B*^Kc%$BUPwBTnT{HILN zS#s$zP`_yl(iZ<|127rYGbA7lhBKxeHe$z9hkfgM-2U}dH*fgjzB)Upb!kb5#otccw_qE`#4+|WQU>l!0f zQ_aQ1b-W^gm&y#W&hGO~d9a*Mt1Vn^R@>G0Ki>DTAK= zCk+%wNG)2o5n)`%;&nz#(1MoouxcRjYA8WVzC%+bCYz7&u)eM%jDD-hYTl~f+WuA% ztsO8DgV4Yit<<`#KN|S5l{yXE2Mv7HiW(rN1xv?+t(5K;!Sy;M6Qto73E#A$H;tgv z%PKGwzBN*dE3WZ~-?=G>bf4DSBhDGA-5OkqFYLnEhMH zQ9aeXROar|uESOo-!Jk%XPzK4H)z*;gp053`2%KlRUnZYJR&}Ys{?JNxsqUE7H-5w zw*(EEOJrX1O<40ApjBd;d54+I!kj|Pajl9KJBvfY>kr|j8yxHd;bjAv^7$TE5P1%Myih3n`8$k2A3w(CsTJng1 zx1=C|=#N#UO!ya=hcN)V0yEg|fWHeRrI_|nZGo!B`}H#6 zf6=JsgcW>IRfChc$lMF)MA_g=h4#&F(out_OC+Q8CAM6mtOBJshrLy~v}13zpY&E| ziN)q-jA*OteV15lgmkR8c#xnzlXr`y%VNG34b)3i!v}!+c!P!!ku)D-V|5m#Z?#;0 zAun&m0iu?4JrsC{4L6Iol>rPEBiy^9B?ZihZ)kB98(elB`g_Fcmh?!I{}uCS;-WG} z?=kN+|57u%1!RUGo^2}kRdbbL|B<^_3XR+hNNt~lf%IQvwGgFCyU zlQNqKdr)`K13!B3Zn7K~!4mXf11J0|%}&$-GYCsR z1rR8_9d1K1o2J#$9KaF01Is|pLj?pE-!aR0^N_AWTPWp5YZ>N$#Tu?9NGGS|R|!;B#P=KVt3=1M1I+vS^h zAGk0i)|wMRkSO~VgxHk=8HCIg>&(eq8hg3WvKy3EtXE|2bba*yO6KMZ$n1IGB4k#H zca^4BgOD4NPc|Hz0Vg!_hGf!6V!>T-E0gIgEwKQ$=O(E(RbqqrFq_M-fTl7zGdzK+ zMIw#e%H*ZeT#_q2EqTkItODdpKQ9xsRD&*0m%99(F{?pPlf%BW5?PzVjM99tu*Qaj1=~3`S=vq(R)ry~56oEo0L>(z zc{2HfFd7n;8n#a<`2><7pGlnXe<-~#%y(e@(@G#aq?j;rkU3e&qgL#ccChp)+9(T; zLk^tOijP!j%$3-GY*1RUOR4`Z*KSgOdebm8O1FbT2tbZfmH1zyln@xBl)i^YX~k~U z)E?Kz|F2Ee8)@8>8fo>LLNt-NCrY($LR*ns6gHNU>jCEwk62}xI_pr(Zk*7De{62U zQs3+P)FqC)XdK=HtH{q>k}@L9bet#d$Okz;aY2tiG529XKa3`qxK1cL1AS3*zF)ie zIp4iU9sEiJdP70? z9lSFP$`~bF2N&xH5!<-Lwcp-2>8-D$Tfxi1_k< z5Wzulg9r|ahlu#Xyp4%?5Qz4teC4`X^#*H1>Ww!1OLo{r>DR7<|Gg1H&)1N`g*QUJ zV&;9qmu1L35v(u(iU>J1IFVx~B#G}+WcXq(-KTz^Z(kcA`0p!e$ zr+i~jO%P1=O!18}w^uX7w--QEiEqtoS?liu#G#ZE7fKOfokJ`**t#-57OedE1`@*_ z^oV2pG^N3LNKJg(emfduF9SP^dh{3q`$r9@b9O2|g8vKWq zBOfdQuRh`nl>YCPy$lu~)5UJa;Qztg7qTL42co?OqGTZYj1i%b-g^Lle^mVK)>CLh z@Bk3GrQi+mAEifB>0p$uxuEnXRa$}4#SKcU#Lv767p3!1dVQJ++xs;4I8ViYB~_NT zVy{nIN?KYaelgF%Us1XP!2QyiN#G#3qkw5^vmXOPc^ZSo{nGf-^2nn~df&MY)ApjF zUjK0%K&e%g_|?4GLhW_I=HI_~6e%>Gs6Cpo9`T4qt%*1!N=AA_l#wXL3AO0f=4ZQ58wyBQK7?`!2a-`bj%;F|xq z`4Ij<3km^pxoU&NdO|sig;!&?3=8gC^c}MJi`ToNv_DEq)A}k|z{pEk1pSajDa!)n zle(8lS>Oz?`3OsK8fq4|&aTf@2+oPfesOC)mw3upCtK0~dlhX};xsF`DBTMT{?UJB-G99skqE0CDhUyJLFpu3`vE? zO+)IQvoVmw@i+RY5r3=kl8^lFT?>(?7NS2w_#`4*=*~yW=ses3%QX6sP6+t57y6>^ z-BNr1`?Z&**MlnVoAIC|=MeqjY`S+F&Q7jZh3?&QSr)Q2UlU?bfh!M*$b948EkSC& zBuB(+FP%86sROFpnOz%Um08Uk{MrlmgPXvXlmpktySU`zWD-{k2fy|bnbndic6D`S z8YHe{s(87JNH@^8qKZZJvQiwlcYdX7Wp9IcErVKDuf4Q(V7=HavecLAxSXElKEf5a zr3`9S-MdY5@M|v-r9L0E&?B;fFP+f#jLb^SaH!W_xQnTo4)xlLOlCQ7o&8#a?bIbL zHQS+Hdm)e*nb*dlUVGs@mM9ya)EtL;?Il#8rM7jb*Ir~B?Hua07n#g;sMlUXHB=kz z9qP50P=V&_;J~H!ei1@a_iitt{97UzZj1&R-p(`-9!Epc*^a((9(_0{>7Y^^+*i)l zZAg{4esH~jay3SdsxboRr9EpAc``61?Om+gbH%^PQ4FH)&)3O%m`!E9$cWhR^J;ed z(RIWnp2L<~!1@F>N_fi+7J8gTF7^)q(e$u|=4qlvJYkUz{8^O#WYZck=j!^Uh zo_k6u@PkhO`#v`_Hs+ukF#0s$Xyt2{=8N*6zsnSXZDg~@`qg2wjY9U~KCkj~Ks8nW<|4B=g zw8_WFCda1I17i4BB_>!_@r-c^mN6fsZ3(*>iZLYs|1gcf(fuKqZFi)py$oX6PUV0w z%RVv^6tLfw#$1XDY|y}%-Dz^qBTzUI-WS86?WgcAB+XW@qwVEsOTuldX_a=mmn)=! zWVF3Jjow4U^xYD3>Qb~yP7k{;Z@DIRf`O7Ow{$(-;{vg`xHAlq%) zR4m{ak$*CG4w;*^=dfH$lJ8~2w42A8Q0w;w1?9k2s=GWGyGuC!)c^m{z^+TMjI>(U^h+4B{-}SZbKTqltV(o z5_()nAVEtq<6Z*Xm1*S{#a+p`$)N($>5)jQ71Ce{kj{`u=Za|-8Bjk*^foXMxWHgJ z&AQg~W})p${r5rvB6zhcRfUB~>Gv=rKzVWy2X2f+BE+=ckc<-^>35P}xb$Fw;|vWM zg9cto3tqHKFL9S(*%DDxCZJo>Ez4OuSD@y})TBls<7zM(q@85as3zirCsS2?&#6HN6m~@G3d#)uJpvbd={~beZ*IKZ~gy=f< z6WR}Mn_7s%y_cbcpx+EbFPEG1xG^XtEjNFX%gs7Voy3}qmVPl7tclNi#BXkTD$zgR zLNP@&>w&)h<^CBXp8i#d7kK1}(q(A4C~aqb(qV7?5Lpp-nj{ZHQk15Gjnt_t4Gt_| z0z~O4py`?RTNn*V-oMZ?291Ga8n9P~I{l($3bJwj`4*f+Oe>W7Y6u<8IbdQ3^Qc22 z4nujcF_f+7#XV`oSAQNvD+C7*GY2gj;;3V1yq(+e70EI`Es%Cot(v|+3K`lWO zwUFAYmSRBS{u?0D(%Y%tKwYRDQX7G8q;WUEIqTaoNGz39yk;rnnzg7|TOFd}oT6g7 zr^yPYqE@_a$z{lGfIJuoL25cvu*`ClOOK$mZFYqlbs0@`V>fg4EU>X*ov|swO z<-i>t*=HfBVAqBNt}UW!%6z65i2Z3+{I^6_`53)pUPkMa2NTCYRh6YOtTi-qAq|#LfWEXTT$s9!~E8_MKxlL()%t0 zFqT}`NRmnGiDZn6wU%Ng><*NVN&myO1J;!~g;9JpzCvy#c$;HP`cEEMymba?LuxSX zr%Q`Ba=mKePuFpmXzWzZ7ae?W%r{BW=>;CdIOmIZ*_)2=fvk*>DY8Pw8bE?8RnmKdO(H@3LJn1>HOGEmDp-2VS4{S&Bf`k*cTDW;&gg~2l6SN ziL(Xi^b`*g{byloFP)y`fqvw|dwnIHe$W!S`@W+0szE|K=NeWL8osTG_f=w>r5Ee= zP?T>>hjV05OR@ohPORECrgK1Qf!HoBLD}D~e_Wy*{0h>duH32ORUar^ZvfY}^o1A2 zwJn{(V`%t8OEP!%YXI*`U)c~iIBK>>?BeH+NExk^pl3a>kXluNxYwTa-`dF0fB@Fs zbP8Z;!*^IFGR}Wp=Ut+-G4U6mrH$piynr@`5*cy??6km?K{<+V$L+>U&He73p}|SN zvw}wbb8#>zlYS=%eEdl9aVo%t`?@DXd?e$A@?EO@Rg^b!$Nd~C??BBt=m#t3iQNk5 zQ$o@)G0MFr4A+t1=7~KD(Dxn@Mr$^1l!Ww`MBMgNz*Mv+2#dY&sSi9 z3|OLvjYKkzgNQP@5Z8sY|9|mB5fL+_+(t=8ApFD&2l3q9Tt;;ln0%AvuiP zuh7J}VUxOK8a646M%kn)alq1r`@INg0-4_83$zG}{D9P=D)EKoFw5c-08c7>WNZjn zJ+?XGwpcN#P_cp*%##Xb?}Nn)u&Z21e8Chk2tLX5Lbk)hzvL-El*YPY!+K?s&V$Nc ziqiqIVZAaVZCI7~%0fCVN^ip)Fh6tlg(L&Pd`3XC4K8Uf$fSu7F2&cDzFaein%9Sw zgmt`19JHh|E8alqOPQ}<6z@w4FJFsaV&FYw*}!;zLe1M1Ug#&|oh!ak!6M6GxH+D!r`Bu!+kdr)m0_E%2(P?WFB{7jb9 zG$@TgAQ8M+XYjy$YxDyKaT25v(pz0d|U-&=}+4(FeMW@qMa4bcQkfM%zXNpJ|_ zYdb><|00#_`GN)82h85fOo5bpFT(wgOp>zy2g?yO18JmUi|aetC|do|0^^BRQ|=Pl z=}hO~kUB+Ju+v7X!_ew!ZWXxcd5iPhP_<}88ub#kfU@MwUMr4R>d>4h9Syu~8i=D- z{AB6FrPV0C`~oHTSfPM!lShE97=!4`r(2rD#z3!tmzP;%_*Fw>V#Hjw z7St1vSZmmk{@=%ld4&-@sVk(O`2VoA8*M#`@8jizeeH!Lp?@0dL6v@B7l7Cg=xa#A1G`*&9)kXWbHnNZFLVf%tMyxN4 zXzg9u#}ko#e7~?C_i?89ONCnQABuw+S#j>hQdl%Wl4lMzbu2>!FY9;k2{ZjkH8sot zxZMp`fjlO%C>E0JOmRvryH+zg-9Sf6B8e_IDvWNXIBnSm6r%J9z#q(VxHIav4uofn zx#%8(aE2hzBq4;9&RY7imS+Lt(X2eBY~cneR`6JsG72RKLp+`(52yu;ajavEF>*36 z;fysBsVibcd6t}t0?BV6_?IPPpn;ZeVih2{F;|@9AdDzmfTr)3dP=sLwY*CFZSgZ< zn@~C}Yv_f9VRC$4jbe5Rsh8l!=NQ`NUW&IB8v<$(aqSI zOh0(?k7Wq+wd=h?Tb8{Ya#D*nB~$W|mSwYdS0(;s%@Cyz0Qilp>V|X%oS^d!rW2UY zt4Xj1z^k%fRGkiSo4v1967%qJ*6vJ&%k6gm=j{9&jA%Oj*f~}hRpJTj2Ci}n=+|UT zZHS)T(lsH=H#AnP$>R4fMvk|_j6sv)-5$5-?37RI1^sWqbQvp}#O0JAaQ~CmB0$4} zIp4+!Ri`{s8lbZnmpi+XCWDj-R+{!j>3FP5wq&hpNDB9Ki$My#%dsVkUq?cW-YUK8 zwWxU_>$I#%BZ?Jcu9#>|BwEW3pgglUWE|C`{H&^d7Nw1{W2EdNYEM$N6YmpRmkVl7 zw(e!B3sLIKZl)v;<4A4$r&u9fa-vGGP0%=-RENBXsn%rt6=e^g(v7O4h91uqBV zMd>5O4ap5yPf*>`B`D3xZd+f~z{X1JLN4Eq@=n=ZRCmI|H)uV~1pkEcp4r!g30C%k zoQIyQP*Cz@_zOL=$zSkKvkn7Zh_D%?c4s!bB53Y;YliIq{a6lW-&3C)8YVzjCiQb< zl@+TfklxIVm%h7Xk|qLzJV@m(E46gc-+5xX0Y&Q~gRp$c<78QKA zVvR?9m`#?xl=fw?%Ge?C(T2>8v0`U-r=5fY8!@rsBZWPX`*5t-B~__L%(S9+VCl*S z;JLV`!f{iE=RV9g#C}`o1j0-)O9B4?;K8oe?jzraz=tbvn4VnMIr9HzirLoXjN6N~ z)~4*#|DTyd+kIi~;L>oIIbcATH;n>$k*vvxWKD9IH8aH=>tQB;FUUWbo#wt*$rfXi zWLqtwXj~!|{|!rr#^qc!@}f$GHtGk-9E_ZY!5oy<{tYOGEU@q7J9@{HKc**Me7K(DN5sp3+>mOY#4e{ zDuoFuU4q+Ezvd`qFJ9|G;?AGg%oVM}dA*$Ww7#JI>Gjxf${CFgV2kmPwgDT^Ia6e6 zgZ3VtJ8WA`DTsHIN4VS4l#4_jh>34Y_63QI%^uOLZ4+PzU5l)_JX#lmuD`OMR6WF8 zXpB{yi25trxGf9!_*6DMJ4Y)JoL4%NP18=bhz8o^Ex66dW35`4kWNH1e`mi;tfq!u zQVrS55cX{|S zd8K|spqrjMi)Y->za?tcm=E9~4YYH0 zYaMg)HvlKM*;Bt=vU+fCTD4IUE`i&YVkC+Y^K9K_u3WYZdLJ+Av{Ad&eem{FS{uXa zZNs(mHZK4SqTirx$DKSb0t>n<#5K@1B;RUbMVi(zd2J4)fFQ7bsWr|<9GM7T?uItO zQBwDC+Cs^gZvFN#i0K2$ZZs0TK-o`nY+uO(6eObTUIjP$lONl&N143zx)%tdLfZ5xAOWc zD!74&nJ_rqp2_sM?KE)ARW;m3nQheqSpDRw|_The* zEN%m3M>U}PmC^a%u}+a>OcbBv`fMA%#Ja{hPG(M^qb^@bA+A+i2%OgTdBj161eAZB zf#DG4pOE?f_10Aq#XfB_&Z*|GO(Knb*LpgNG`91@LTj4SNNH?INMnn!&~KW@3`R3k+ltY0sjv)KAH1~ zqEStW{tx76hB&~`NjYSjNq8TsCBjiuD#_^+uB4_4KRo>VnSB2aUgpun1q=^_FzmE$ zX4bSFiKp4y_N?zA_&>5vKq*z~f=dW(saYhP2nouBf0wnF1h9gId2)Iu!gedPPXX=7 zoV&t=852CfM>5C#d#n=~MQ|95&1mKusqn|vLMChrFg*VwYh}oZnkzn0^8OB`17Iad zXQ2(`-$8^CFTxDptK6+DceXq7R!;_uINa)pTRk+E@%v8?10CP$`HcG}O0OP;-L0G_ z>-S-yFbOdu=SA_b1Al1b3+t}$u>VY0_yg2qw?O%!lY!&!26{PH?6)RUAIp}YG)eZ6 zdZD!TfOSwK(unOSU6pfEwI89iKVi!_X8`vjSPX`~W2i7`?V3ofT^p&j>ms#wz0%r1 zA!gQhrJgnlbbQkz-ZRv+;Acz%Mj{^**(Of-zu-j(^dWa4UNBK6=}T#HvG0P+A7!TI zBx_bHzEVC?&1j*`9Un3@#*Y6lFk$IWpbU1S1UvpW@{WH`x1$$j5;wnAQs0mCBWkss z7>bY40(F0)3=s#d7@qqRd6~XI9JJ;#pF6q>-NNR8hFJ0I7BFo$E+fxp~?hD!vwF8CU}AP zRyESqeffVkvMHjGT-gY=DXeQ*MUJA8YYT51E1Q9e7y;d15kM_WN2kcJ{YHs0a_KRB z{f?W}g^Lz3xBDFPi0cctp#_u_!S3F|yF{3echh|QV4?K!bj&}iD*nL3J%t>(rX#MG z;#ZwHfLbjafZwP6iI@TxqxT~ZtHD>~zJlTeG&btT3Dl|yJ0Toj5v400qnqF>)L?-5 zlZ&!|(?K`EJ>+>I(BO=Gv)_7z4d*e`?AkV={xC(i{n*Rx+E!h6pjVZP+VULrqZPYG z)Zsobh~3$?k&YcvBlfb#5!NjZ$&EuybHz`pbRbG6HYmm29_s`q<#CkuYnxSn#FAE{ z^+DWvwQwRm1Qzf1wsKA3Lca zjS*TmchA$Z4TxbUl%ru3;&%l+48Vh3-QAa;34wpCz+)&1@e|O1v&YovodNL66mMYd z)Qn(BgLe9Uham;%ABxQz0eZZ_=G@3X)y!~}yN6p`nYlzAi9qkKRGU_~hHFEDrmJ-N zNky#7`KJ}%>ne!D(w3@CSDZ zQC`}%cSs2!AgKf#r8ATuU=1k&>YlNJnW&pojsqrbuXEp0UxEB*Rah`M4CCPysV3*R zyHG|a)Z|q|O@{x?ey1qC-hG4nKVO11s`K;Li6Yh3?$+cw}pCRvc(Y8=PYpaRfd2}Iibq%D!8R#u5NZVxZ#dC=hp0aWR0 zC>_zjv8@$P*?1Z$-GR~$vMy0i{vD`)+Gb^$9!2T?cGFHaFbRP~h;4tnazsmL?81lJ z-;P~JMA~igV7G6a(3_z|1T~xI%9?zIvU#pr>-0Z~wNCEV!LTe7ZOuRiG2j#q z=H}AokpEfRm&6nb6P?C;8M$k~Ql1#HEFwRb0ula6Hgd<&cxsj#SAXuk0(8#K>Am!3_^CT($YFi-&H1aFQ#(G8ncEq-7 zk%){sqUJh-z9z`2p`4qIoFwE-q?`dpPE+KJqnta9KFqKcf|kQaPIZHMSEn z_t+S-SBz};C4}mqX#@WW^vIZauXwZ_g%atXf0pf-l+&&;=kdgn?D=flZe&u=H;mCj zLRd9MgoLncOd~0TIkvvC{^Bv{`E2gFf3B^!%&a34<}wM@q8Xa683PrmV--QFatyjN zza1H{YSA1S6URU?7jQ?A8aD=}Ri+fRwYH^@8WDE-@vumaJsuXxk;j!t=GnR{0Y47H z-etn7MGK(a_c&;H&!A!V=ITHcMs_hw5y5@ds@NHbh4m%m}QCjy9MP zSQc%P%$RQ*22dE7#nEVUk7ULRHWe3H7!4=tlXe^}0hrrVB(I;+&pJk#FT8#_1!KA1 z^;6CWB3DbfOkO{ojf6Fv9oto2Kkba^L}s&rO1oTLKV?WNG%nnc@o-k-BaXk(M~zsZ z4Bv93Zg#iErwYRS08OgzcD0NiB(I;!KBN-@zJ5wy)b&$p?|)xEorMt-!+kTZpK=b- zAI_%hr{QcGWZ^Qpe#&J_Xzcl}kk?OX)JEnT*G~yj^IaKv{d5&)b&I@yDzm!F>!;ig zZbI{2C9j{7#gW)GUk`cxG%~BFynecop(L(e^7<)}F03-^YI*%MGV2<7{glj%LZ3mc ztLvxN%Il{QrM}+s`suw~fzf7AtLpmcb@KXYM5*t3dHpmp>jruKG$Jdtk8%B!*_C>u zas5;#Z!)f*GTYhYrQU2@KaI%iYg|9&JeDY#dM&k|as4z@przhoTtB5eE%jF8`l(Fz zcc|;9p&Bu=-P?@or=bGPH$YxL4NF8_KOHEqpN1K3j0T!KoeR-!9I3POHTuSR^x>eS zlYfGhKxT@CYM6Y5)L>U%x8v#1K>0-t6kc$!QqL7HDlfqLEKZzVusOd-mHJTH8d6jt zBtira;w4)$_$A7^qx6afrB!0FZ4i}~4ngT8*H@|>UbsO9uU4tM@NyzntF>Z@^5^ED z=Jn@7YY+BJ=8BgUP4A$zR&@?u%`u{>M!cfD*#ju;=PsA!-*m1F~f&fM7S(HDR z`}#$Fx(C^(^S^3Cpp-l}dNFTgE@_DWHCv&~BM)!1^zdG{CCfbW@E(&M-ZI-EbedK;PE0sItrSK zyqLjqqyHhhj(|ggSIp1lgC^v2HTH@Zq#OCBZHS!d`E7y)vJ-FFGGr#dM)0C^S}Dek zJ}u%t0cT}JqF1~mu`IVm%L)@K(8VRPi*MUDgX5T0PLmV+s^P>Q)0%k2dPNJ~Fi!G{ zjf{oeT{yU#CC(LwgPQ>dcW21KB^Iq@`b25%WZd)1{pq4IIw@te%Ek{|NEw}z-CJ$r z4Hm?2Fx&lOxa57%xqqd=-my(2%CQDPZ?yL8lCQD#lD#=Xy@_tGT=GNI8*h8b*}y(} zXfUZgJLqc-`pH3W-rjJ~iT-uAK^7A69aFG~Z!hEuHdtT8m$g^x1`_dQ?Mb55Ugcf`(cdNCdExbsNARFy}<+ zU;s~Y(N8HTXT=Emu<_f4{QAZZ+ZDE6+{|p$yfycMhRp;^a&gbgFbTzYn!!i}GVrRz zt-0s13IA0R|A}oO*Mk*+ z0w;E0v$5AUgfg*d2ikcDU7D_i!lia9m!_pjhOU9gJ8c z_Vc@S7%Mj_`r~|6_YP!cu;BlUy?0UiG7w$zcu0{{kev1@JIEk;jrd#*m(Ky(-yOe; zfoZIi(P+S{Vz%x;DN4^UO}B+;g3uLBbO6ktDL}uGZO{~OhH1j9VzwiU=*l2w*$xBT zYwCBdS|r1MmEd&RZ5`-gF>3pOqU{l`GSH|()KS||i7#w&Lv}hKZtw7~LK&hh;KYH< z+dI$+OBypkey2o^UktVlvQfYGpyu!ngWN+^I20yV1^N4*jS<5;{P_*!|CL;G!q_5+ zuU|ruW&hVUxrY}lY7MQqgd+FIJ!r%Q)>eSa%_Qg{js&p!fFrFGEIf~Il;hQR8pwB# z8XuCB+`Hi)E$+s@3=iOM74Q=PzQg^2ypl`jnB|q+;0Ro_ZO2z~Ys7bobFTvSPIvt0 zA*r4v3y6Cbw*H`2l4kQ+fi=5_wz#gx>M2ZR9VCqeI+7Y0E`M*CUT+x}k~SLX9}8 zATz6Q!^=IQ0q^IDUsZW;l#A28d);?_8WIoRf}SUSQy^soa(Q}*d;Rzj2&o3f#_viS zf&dxb;l7amM#wOvzYzE_TQBCwdVuF&GE#A@K5+ger0ifd-KFVx39s()JHZ@Q5v8Y5 zGp}=BC94vtDwyYYH%jx0!p`SOa&(d@9Z{#wHb*1GL0SP+x@<_>)}=y#6&E8zYz8@1CkCQ+sXxEmIEnv+Eg64B*<@6!k{qTT z-gq>t4gX7-tOwk~{`+tt2{2vBY^FG=7=Dn+C^g6+OG6_9w`Nb->IAiT9uR{sxi-A> z$mMuS%%-uA-m)4?o=XNaR5>0|)8&Z5Q)0F_D-FNi0RG^T;q`$bt+O_#iOPqeydZC^ zEDsS=CC=HR%~U=G<)iWzs`~wn;T!DJCsvHkQ^#<7;o!xic{DR%*cM?6`!SiI^R<~? z@pvAOXqwxyykblqUq+!xE!!)`=BaBN#cjNzJnvF)0?+Z(Rlvwsd|w<4Kqn1UX(YKCeH*PAHpL(bg-T%;Pzkeys<`;U?tqq)Z3Hwt2);dFnjJ zw(@pfF)8npZz1}q;AT35H93#;(f_w?uk>A4mgjoKD{^pxWB=IVNIa#rGlX_c!TNF1 zqD1W2w)cu_3np^JJ~#vv_bwnE1V`w++;s(tJ#<9x`U3Vs{J2v8^No`&;F6xNxWp5O zBcG{@3a+W>fY~KqiRhh*pK&S6NUg5Gp#is%T2*n0SHv5sWfdl~@Z|G3nyBE?3Y-Y` z=JR1`k{<+}kgo(?LxOIcuLNBJK_})bL06K9n&c}HO{{>3lJZGJ5azgwJg;b)KM72P zhz^yn!|p}CJl7i7Ao9H;Ek6;X5hkHd;FN3Ue09oooWLp9KKbgD>mfSjIxwG4xza8- zjzQkd?V;XY+9My5&te}JQ$xat3d|E7z2Y+NrRIMkW}?j9rWJa{Q~CU?0ofp2PsU=>%oWeXWN;(n0ZJ>R2B4RE#U+k@$cJi9jA46K`Z_@8=KrHiM8trAYStR6NoTX> z8LCNVR_hGa?2n-`Un(O!X9)R){3qogx&ey0K&o4#;PueUg+}5!sO5_)5y*ueE|Pi( zkM{#S&BGdmnIIWbTEbF7q8ViUGRqnXC$15_!FAxvJ{v=eM4SMgjce@VWuGx`Ne?OL z=hH3Z%+nk(Bl2_$WfCVQ#q?&=uo?(H&L4h(JM4c2v1~aSfTD|6?9G1{Rl)n>huq@R ze6>>O|DaoZ#tHxAn9ab928%B9iZAjDq@Lp!S-j%w{31%Bt55voI39Ziq;9@98y4=$c0_ugGHD{>m7# z@t}2s2CdBIOmMo()n1kG2V)kpU;>5LctwYThtM_uv>2Me0hf0z{1mQ)GzQc2YUJ;l zi&xZDpI{fLPuj?;m|;Afk93c6i`yM+e;Wly_4bN8j6}(GUU8>G9rPZJjuty)M+3>% zd&OYKD3)y?@dmHB%Sbr;c*WhE@K2B7?Iv7jywNMBIrv~|qkw*sS3GYd>^FNwm63?+ z>lM?DL~K9A^^8Q!EnYE$6V$Ehm}GQMlr5^oOxU1Xv&4*;30ykb{is_!&94YS(c^CQ zssw~#?e7(n<@-Waq9%rdWuo*XunjHveq3n$k=sMD7|g5psChNuxXml>Es(ql=3_gMEEbuq#lx^1OMfX0_DFfC6DFJ0_CyP(UkWVdjRasV9Hxw zpuCbJe#O$Ju+pM^U^1|2yOY;Vxl!?-GoP8U-nWB8t)i1QD=B4}*$WC_%vzh>EBv z(t8O3gwT6$p{O7rNN)lHD&5d~uTtdw&g@(Xf#3W8zxjN=m)&P(o_VIv&hE|@r8-X* z1ls?sdTv3M;0T>r&FR5s+%_Z)6eHboO@ZhCagC@@gw9bf6FDW4nN9*+71WqbcZU6u z6(*5$>7(3ok%;nb#2{Nxc}O4aE~aD|C5yxgPX#%CrjKz?mT94XewV301!)y0@$bcn zdn?G9O!`>2TH)hKV$zWic#>H5wBy{LD$|ON5yq%WQ`_e;jaWui(hqunO6YNwoM}~N zqxa*)_)1beamn{fvn#3Ld!QKamfzYCXlkTb&zN7V`bBcS7Bb9prJnuY( zJyqvW<_nCWwVX_u=B_XO$;w4x)~O(C0B6(XEFDkA1}T2m(LkNa)^%q1ST=hIT@b0u{n28!yBP7ZcRwyP z+9*D6Z^n6cAZsy4jRqpEjrUuxn4~t{;U%F-fsgSSEILlIO3FTQpqS?#sMJJ^V@^@w ztJ}*W!dV5@MkEQ8f^eR+W&GD`ea;lPipKLjL4-9;o?> zkP9nJ(jmjm9$H_Y??|EM^mF8OEFivXBVCiLN1$|{NU$NO+LAKpN zq8L?9zG8E!0ODbf( ziXS*3m#Atl(%KTTV8#6Ufn4%JcY3Wph!X|XPA$wBm%7U<11ID8wa}!!11;Lwx$fR_ za+yr-O~(YjoSMK76u&uh>$Ds~#Z@frL=m{=CM5Gu; z)5MhPg(9DAM3hQ3#YHPBnsejPrsfBxxTeoMADYcfaosvk`jop{1~P1!ZxQ*$d8@nl z$aDf4rc`XoN1jKCRl4bvlu?|qI(ly)W_*(Q*j4tZBgJafON+GHc-fWPRpxRzH_@ud z_?nFByJ-PyM1QzP6vZks~M~Jn$TbzrA-<`%8*zit=S^+Mm4M*{{op`zpGrzsn7+^A)eCbY4Z2zEO4M zvIVc;NjRljHmyzWC)Ln95)F}+V!jV%R$x|uhWwSZsm-UxiUO7NN<UOEL_9WH{mEOK%tQCW?a{IMXFjlT}gD3BP z%t~dYkL6%N+as-sDps;K;gdK~tG2Lu&eGKN#ZPl4HJ1Xh zH6pglf!O5AQt-t8X`HxHsgD!eD~$91s<}`4pYDq^3)O7*MTr}g^Z-?ntu1e=lk~sT z6`(XLElw1ztUbi@it(fB1S_+|cHQTVoXWSpy)T%-2RQoCSZG(dUUT1}J^zm9a^5c9 z_hW+If6(6DR0{7;s3E_1sw*g-f53YcZ$_cpJZGjeM@aBaX3bw#OWq-<>XmoRn3e3( z*CJ0%W0_V*Lh~CcdjdXetsAEw*Gadz@>YxcU37@F7P$VB+d$I0S2x|hxOVk+bKGSw zBNKql|G0;%&@*vwQ)z16I`Rc8pQ_5ybep$g;Zql3M#7h@d+H+0kRJm1UY(FMt96`c zUr8>haU$2qJE#gnY&q`fDz+lih0&|h=De}hj}g5Cb)@-MaiUKkf`qT*L|>(oLXxp% z1Y`RqPGpA1);|#2KHV+#B({~63g{ZiV5sk&B(;eXt1A_z?3jTrYR6EtPTe$dW~U@#qtn**JEM@ixMmSyYbDPfQe?C^;Fjy}BCVUZyH_kKgppC-#&IypDZ+zJ zO={_zA&erqFE+_n`8H0Oz}q;7^b$yKZ=YOCAmdey%QVyy$P`XWbk9*sAip_FAS1N(zgc~D{=o&R1pYK$@tji%jI8Bff)ORZDEv3xwxV2qbKFv!B z9#wO2-T)wEdgVg7SLEp*w z7^Py6c%0%}(ylE_aFyltwkq00*5E433%^0)2?UedF)^4>Sx%=0iLwYr%S)3lUGDtj) z;H1&aCAwGnh?Na)D7i;FdXE~t;>1K1F8!vvx#~SL4oa_cs>o$Z=PyhAr)zA~95#FN zRw5yK{iSS-GtyG^{*9|6t6u9{dhL3I+ykmk_#RMo0$cFNJ)r8SJ)mS={LjaUy8YDN zlY|CYm1$Nl>ph@GiVmJ)h(%gwV*kX6TlR(JW%@^oj@orBuJvzo%@UnFWF%pi;X2a$ za>!j;PN{bG$b}s~q2(3l0(I(qpr#nTJH8CtQhZ_scFL!(tIQGEpu9qo=E98!?NtF$^-5HB$ zMD@{QYdvZ)*{k%>^}NuR3ZK}tgUqnZq>MOmU!r_=f$FtLzSn=h(w*MTGnrUbN7#=m zt;9;I3UTX^0k2qM&R!hTyL)aZKkT%?ytr(loEN9}&;fbW4X@=Bov{)7dwR;Kx#3t| zS_`+oX3+lrapL*JT_O8xh1*{{-2UWn`(Mpv|7(u@y*$qPH@8&>#ECZ&YdZEHmG(E# z_V@P4D|sq;mSBecL86>tr}yy;cZACRk3;tN^^C?rq>b~6uLJhW?(`dNe}+YG-zL#n zU14`*=1hrlb%n~3p)1RHmXQ;62h=SP%wXi{o+w8ieD>3kOz?^NWiCDEEuYM zN*zt(#IQuQSTI#&>R=PSVtgRjgz#V!!-GvqR4WBjM1N0bm83~tksY3-?C_x30ow-X zNSM4X3`DXh+~?w8BtP(oYGP%#YAFLf-Bl!0ykc!2*t+mw>x02~0BJ*_+7WJw_`y?N zji*z+;_q4)5+SXJkW zQ>F6Js!r#SDN|XzYFH{uR8^^zvr_H>l&vaT9uZdO9>9}T)x;-XMm6pMlnb0x#Jp!! ztXMKcTQb8dUJAG5<)9^OP5DaIz^!j`BRSlXSHmrNEte&4gG8m;fOeKb3BXqFYG_pXj3E{yeh6kHeRV~U+ z6(c>pm63C3WPxC_!-LHU2IKbc+^TB7fT?1XM-Cq%Z7!D*fnbZngZ&x|MzdK`RW+Nb zVzg(z3O3IxRs@2r3=g&{7>p^>>Z)>zL?w||AM2{h8w^y1F*>0?dd0@@s<0_s&*q>W z)^h%+D%Wzzqp`Y2`N=DG2BO~;9&EP?MkM1rS<29#z2Z=KB!{){%ybIkNFb7U)(VbR zbyi{2W|_y-33|qBJ@dWdVz{15;d(BI>$wuH=W0OD1dqJ95ors&BBGiere$y7s-|1w zr_`s&YI>NqTEvO`)nt8=wWK9;)M~>SL^(0i<3yA?!MKT9=`UV!U%1ly!<7~ZSNcG> z(g(wpJ`_+o$+KBmx{#(EuJoyZ(&k*RKV3~OOkqy~*X!l0Ig42t@m#MzQ!OkTpH(MV zI$0}S{%U=xWiRZHXrE2Qw8Kx{;mcJZOI>qy@vUD-e0coXb z^8Kz>n&D#V^PWmRCQS8QRK9=piss=ad=&53S-+#Jg19q6j>D&Xc%$qsXGf%PoqjVuokDvfwwb_`aI%Puc}y zw}!H!QSDtP_9(6eI#m16iG8LMLCcfG-sF-q$7PN^co2H0tJ$$b4ioStdk<*ThaJ%{i?Ha~HNVQL-T~owt9dxBv3=a=FB0T8G z@SvjtLFcGhoJd>c6;s25P74O5bxsddhp0ntF{7Fsq= zK+yQ9d><`PlPvTsQhH>oVtIA3s>LXsr|nte6>Gwkt_`T34zvnQ4kWj3DG zj-lOCT@CFKnSD<;o9xvy} z)bp1A$`h%E{952SsgkzdD{83N@gS4-Ty@tHVY&)LngF&AfbI_LO;?8ogx;h zt3N(a>=mN}2Fn&QMjM=AQCG)S*Ii%~){oQH^H~jUr7Igr_##gdar2zFS4;`$kt4v= zK*Htf^)#(VZL&T+kXD%`GlE%y(wRyrGtO5?+e)R4{$;TzM>UO&%xSCZ{$+bWyPWCm zsIKoV#1qv{6%~g4>RF^pWD`SNz%W@NmjXfSbLDioy1sIfmF-HfL`KGmtJQNtmF?d^ z8K}y3El>ukvYAP$vMuo>IAxG6pY|x6B%k&ORK5F?!mD19r0}ZufI5>^Z>i@ymCl>L zvE{|>RZo5B=1lxWr?&m33vaVm_*L{eTk0k`wM`Y?>q&vaBcUltPC_#b(pE1?rj0yW z<~ge-(tj|k4Cs-C*FfvZl!f<>)}spV-6UO6W$wMFm1g=;`o2=C3-5EKjlx^53vY{8 zbWYM%b_r-_oly>)k+UEUJt_$jK@Pw@kzREzsxWzkKo>-aREz_3s(NTo@QAZiVXt7#PB>(dM zefL#E>9vH`H&v|h_*MCB_lh|xsbk_eXoHFAYds}#!cw_vI6ui*HJl>Wc^at@JG^3X zl3c!$9VVd`t58&l^`0k{2Wtg4!W|RkT8#RP(p0fQ&yja}#mYd0D^*bR$mY8$De%>y zu{<-B)Y%!}e%GVIE+(qMyd~C+I%sA(8NDf~hm+Cq>-Z8+QeWr98W$%vC$SVFXRRAO zpDN>avvy1L%JS`j=;am1b`?Fz*yO3GeEv-`VklUByH=Nt^!kzdJ`zedd+I5#d%WU6 zKN(M9b7V;Oj4H&BgIzTmRg}S z!BHw-dC|tna32Rh-o!WjJ}#jzHMZ$V`#PHTKEc zCclNl{jeRnMU2G#U>^4oVyB)>F2enJC$zi~J~4)FairL#L+`}3VMw=f|_=qxbtk|zRmNj@iUgkuacP+bMd+h|LxVba_y_2~3 z2cyJV>y2Zo=e#FAmJ>A}8!1leb?TQ0`Ol_cy!y+Nr?hKxT$koa)oI-ab;tFzw}ca` zd;wR+N?ksqLr;|Md32xEt}AfO$)o$6PSHVJ&v>6J8cb35P>Obhr)WnYMd!8NBI|h5 z_qLK6DK2Q&%DDE=6YE8tq6WC0_11S{l_`?3lA=pG^w+pQlt=ev?K%k8Ik)LHCn1b} ze;ivt%$7$H^c5q-Ri~qFxdzcExXJpm#^-lqYnD=rYQ+zy@|6y~CI1I%@Y4xZm{72!)^IG^$R~&#UltaQ`ol`&iLAIzhU( zY=---z?bV}SN&8WD#IwzC7RBHM%NQ}z7A*j@_w|gt2SYV^inZe_-_Ab-6+v5+L^j+ zcC_U4-TsX^z6lcjwKC*1UefuPgzn{(8Dm8cZDyh0nXmbn4+q@UXLnln)KP-rppWIlz&Z}zM|*VGC=@v-gf%9{iJz4> za=4GS{Uk}_s~T=!p08^3jc%&cdwkKpyYAB`2W~l}hxXLVP!&r?$9nlwD(OZb?JfFg zoiV;bY@w>2!zJAXgSo|r;|f}8a?>JHS5VVu`NTGlj0D$hj{Va_e_b@wH)4OE;+~?EVC`d1A8-*yeb&$blN*blk35&;o5U;JU6!Q z_w5TAIa_<@`tC_PQr-*55+ig5eu4KQKL3hf21?Il;7FZ;18{#J7&jT{#GPpr41U{W zxESR$`IgI6+z$rh#=8@DrV$?ZXq}&D@cvMqxU<9^w$pycYrg~VThuo%;XNwCDS_2!n=l+tc@> z#AMysR>hqczDxM>zVJOI+PMa6B)wEGQPP+9h3~1lGIYet3U4W2DW~qpYXDhEd6#-x z^g&gP$15L?1$@Zwl1LweM0u|Ng&gqWqfo?2UZm{7Ntuqv3y3lO%tlQ>djN6YJSk(Q3v zvnz8~el|sl_N%M0ez;DtcRTL#+ECZ#bb~i6lq#8O!7`URNuLuv8m;ug3Hf5hTze%+ z_P{0$rQ z<-3V{RbMwNxJs~1Mv*0!=-%RiKiMq$4(>~%Uy$aeRmHu!FV90Wndza2X0pU@x}|=M z=Oo{b&_&Y@BD=jyU8X$`!Eegn-Y13G-T<7X4ygwJzO-4An zb9b35bTM91x@!h?Z}>E{ zc5j4xE#EjN8M1ZDs0hDCXK{PnYu~|rZFD`A#iMbrDl1<_UL-5 zttR3=#ePc1O%Z8jcWbK~qsOB^*jD%6p{;JxZS`f}E3&P=jX|+PzrPBw`jqTTb|DFkY9}`|W zl|NCX^S3UYU-0htH9xC7%hHjajn>xQf>22Rr1Gb(t} zd~zHojXzfzozPf%TY@6liB5?Ve-x6vHz(HAz)AX!=y!<$S=2P%t}di{*%B!`c%R!Y zA){MsmuO3h*V*hO+R7AS{k@*gO+Ene`@Lr#F^s6M@#&WFsUO}oX^S9QhG$(%XT?Cy z-T{3rdqDLRnb!0;lsKklMpX`+7_E2dG^hCL{lBJp#s5z8?mMNq&dxigxix9tdavh8 zndU#8G=GZETxtFcL9`6dBAZTgcg|h|o#vyVH2)c%X6J-(dv*O=N|maktOX;) zKe`6)#r56WTwCYp{CdOprp&Jq=fmoVK5^=ZKKcJ&NA}$*zjPguN<(#|^?q&4TfX}L zug3ge3h%(3jM0Uc+ZeV|iXNz%PyB9Hlup~H*>gGV?yfzTY#>}C4-tp;#Y#`&Tj+hqcZio^WQ@%6wo;Nb-Me3`9MOa23c|kY zs~_crq#r(YhmrND_Iv@)jqUe*V=9ZOrL0xmZM40Z zduDutq{>b`d}e$}*VzwzAIduWr%vKsM%$O1qzx38qgke9h)MiME^0jxsz|cNlJ%`- z#0lY(k7^NN!sk4pALWX1&`Ic_Zzv8~qd5cC)k^?VOatU5eE)VlYO?2?BSe`eJ zDX%QLNT`45=ycz99i8~Jeu%v{Gb~i5dhK$g=%hnEwvQV<&gH(e#JSwhaLG{esvHgS z4ctJeE;>{rLY4jDPH_zmi)*lstD6orh)@ge7}wyixSUX|U&c81v^El|weQpB!O^g& zyq0d=E5^AV|3BK(zRTlftjLH- zkTJF#hL^U!K1YLzalUbzyKC*|=xezS&yDTxeSvS>5~K4?AlOj#y#<>i?=YZ^N4jc&fJ18&sGmK*TxP%mZ zYpQ?Wna1NXd85TF9rB&KgiIz-AfyZ-Lyn&=LoSFZsZ9P>hny2v=1Uoc@_njMTe(Z4 zyn6X1#wGo=9E(5ghICJplO_JqHSBF1|MC@^8>~Ua$ZOfj9E&Ueo6fzc z;~dKknUv)N%o6f=ps5o#R^&MCw&etL{TvV+*(0 zj#oarmVPjXoWz6LHhiO6ol8Fy^PMA;TT*XU8qyEP)K@Y-am|8%Kv78){iPJ)82YZ; zKyf4{L76!hGjDlc(uUF!K?u=}8sxw(ejQ`**< zgxu{5d~l0C&iUY0?k@4P?h^Olxv{;+7x>^7-nWob!P%zz;8vD6qbscYD7%;XZvUFp zSaDXz{TS~5_yS*@K$-K^iQI9Y({aCz=f-x9FR=GM-knYFb$6`x-p>-}b=+O?zR#C; zulftRJD!O96x-cB*l}rPck7NX#*B7qmeUS7OG8TlDlDEV`;KI_NtjExLMDElJ$XqHLTQAjXP+ zV`62kYPk-J4*T>5t#a+>njHH_ifg)cAH}`xU0S#7cl2EK|Ju5*-=THixI^o{`TwnT z%cx~X{-5r$7j9P<<8q1WT=6TM;C4&9{T^~VU;Mn|*1k}@O8!3~w+jKc4%%&0$nC77 zlh+3ey&}V_1E4mg+PFp(T&aQ<^QTqzN|O|DX_TQ0 za6Ey2(V3)EZM5!JsG`z%-giN^7a8V%wBS;^RePB!`WI5|CGD~={yJ*Mm=zzqZ?u_T z#1}lpv#@662k#qeM&^sp*=!i&%_Jb(X5u69^5SlqnOHQQ9>SPzCKgkAW|)a3m7bYqVkxC(mYMjN z(vxi_mR5RZn~7zVo;haX<4Vt5Gw}(fXP%i@Ha?q3e=@5k#JiRm#*b#Ik#N zQZuQuR2w+|%}nYhRXFDjmYK`VqzOSSZu!8WVajI>8mur^nn@Go9A^^IxvVpjX35+OMG*+I-b|W9S>|z>EjE}* zW2NFZe>aoHyXD0gnX%EVQ9)i-sV~+zpRIXwlUbvpTRr2DEjF7qo(;O={|~c<+6+Uv zJO1a0E#`9%W{a(6l9~Xzvv#6-uGnTK^>M74EB-W-mIN&P%S_6UIi<7mZ*#kuG(SA= zcbG{%RGjiXBe(jIi`kBe7tY9>zRGWD6(;D%cyp(jG(2QinD+*|%-v?qwkrGO59!!r zCiPR9E`K=RYbFg;SuKBz2LG5jX3b2uEU%C>TkJEFG7nP^-Qhv^n@K<9@p-^Z$_l2* z$){{_&`cVtgv-wA>gTwat2w{#kQwR97KhEGMaqxLBI$s2N6e%lDhqB4m@ST)Neh*W z@*Z~%z4FvB@8d992G179%%t{8r7oD`X42?DVVp2))CiVuz|NCqtrwIqR0>LIwm4_p({*olqt!MJl}wu9#QN+KodARv{gyf6Z%V z?N>t9N+(oZH*5Kos*pJiZkRXC+P5XlXz-tT%dFiXj{?K0RsVJc!pdjWZXWWgI<9OH zVbux?uZ1qF)=PIZIMT{*)vBwMXj=ztwUJWf-nb5?eq z>gb&8Jb4o8q&#!WAI{xYoy&CysLlgIk5%Wo@}x3B_Ki+;nP;(izc%CR^)hXlzz0Ezo;6)zxv|cRrqcE*_*}iS-Wvi1&9w*lQY#1C& z6uX>*sp2FDru2h&zqrX+yzHv)xBQRC%kI30<$q!xgR=h_p}KJSL*x%y{z|z74_W@I zxdaK8zeY&l*5Mwu{A)r|6g^`3YvmFYwfyhp5)`xiALSA}YWc_H5)`-m8*&LsSpJJ4 zK_KpumOnF0TIQ1MIAxB`6QwMFpD-tt&GW=#mjBC;6mO+1|2Me=Wh{S>kigwesgy~b zCmy%_Gs2v7`k%1;FNUOeD{J{1=Mp?=`Df=6l(YQFxdcyH{tt2qp0@mTa|y~@{x@<7 zp0WII=Mp??`6uKORIvO;K53-Ntw5GlwEREj@>a?6FUTdRZ28l2394BB&vFS8Ex(ml zb5+Y<F1Z9RSpMxojf7h1i>fdh&6TmBojN!_aQ&l7bm z|F)15_PlQScjpqMSpE?qfg0!WR?qSe36tvf@`mMK5t8EVP0N2am*6eSUqMJ?-P+>% zmcL3a!P}O;$KD7M3LIQU=`fj{6 zv;4Edq&l~nTmF|rQoMa+`BQTVKDPXGa|u4N{IBH_d}{fd{*+vT7MB05T!NOC z|D9Zdbjv>}m*6wYpD$mqTmu>Px#j;km$xr0|H52?R+hhIF2R?U|BGCL)|TI!SMyhv z|H0d&fk?l${Dp6GBBQ>s{P*RRwz1gbFOS*ZTK>UK@1e8(JIlW;^x8%~% z)$+UZCbOI6FO*l>-SS@x1rB6i56l1GZPGwz(bMw(6>=gQdRhLxxdgo}|EQ24&{_1c z{KLYex@GjW{HsDzv}IWS^I?McO*eQ*(W;vezny~tR^b=o_i@nQDqQ;~5!t?Q&E$U8 z4FrXsHe}JWf(=|Gy4zx!%ur7-oD9gXe(t@y~u1UE2Vw%57s^;DIJXDL6)>4 zrDIg`VCSe)baIv@*9}rSTggM5qb}a$q1Is;qHBEeNXK8dlF6gg}Qy7uu4%ca)f22yq`SGNfg)g!yT_& z)0+kG>0A!Vsjp$)lk40an)I6Gq>EJSTqvp|RYLKvA)H{Pq{P?fV7yf?MXjWavkE_I zekcP;Urr!3byb1P_PA7=Vil1Vs$z&RLdWvjYmSw2ScRO1Ps5BNd$Yw{E9F>7SZbb? z5+mHwgR+#m@uQVe$Z3*6shSH1u3~<&QqBhzsHybNQt!`J%GEsLd1AhmGRx7XG9ozT zC(JzF)=A;Z6*6tgQKn7Ff3Z>uJ93rWpnRc~k`QK^a_n}Cw8i)1_gCx9GVwz=SZoz; zIA2JWi>!HKiItLl$3XMMQY&TdZSlC3zIoy|D`k%3l-urr`!Xw~M3{TXlx(rwN;w|P z336tIm2y%kQ9t1>E3K3Z>O3TDu*zC(rCd}3rBsz&wpe4O+zk1e$9r@s`JMQ~rR%Je zhi;c$Fw>Oqh=|)>*6mr7*!c>%3L}dPv~8U|qE8i!ggV zm#oWH{qPicu2@$sw$%v>^{;i!;z7MI$#v_7RsX(l$xZ7&tA6ot$t}yU>z4_W#D5i$ z&&X#tNQjr3gk890d=Dfp`#nbzVZU1{ei)Jh_WKF(<2lH0zgs$9E;mNng^M+avJ2V` z>nbZ+;Ak{(+a9~&TVaA|JH~GGRG6TUZQ2dr3lmtjZ8!WVOyISBcEgrof>=AwZun)G zps;<9-SFEmLA-sh-LONL;6D3)yJ5F5K@s}_yJ4R&!GrcgcEbT-f&}|vyJ2cj;O;LI zlr0{y8!nWGx>*#pi`fl_$i%vx6hCSgw;PTM39>~AyU{nnb9Z}3O-Z}axAM?YQ!lc- zU9ri_$)#)=Uz1migVP*-bWi^GOo(jGfv{rBJnCx%5Jd$rjJrsm<>ou3)Er zp~TBrvr(g!I~b&*o!UBg5VssMP*%xK{rV1}0?w7~)K(+pf+5p+o?^JPq~H~=nRwW)DJ?6wT>Eg>W9IQ&XQHO zc+O7!C_JK?cIsz&)ID#feyN1vK5N;j{nRD1O08T5!MXTd5iYD_r_NTh`EcP2 zcIp7ttp|kxlU}q_=cz0HJT5QUsXwT@33*&zwo`vncN6lsyke)e59OS6#>`|pwOejs zV8-&Qo%&sF7reh_r*_FL40!k3so#e(T5GRsr?$%@eBDm%np+sqkz%LzRYR(ZLM1?+ z8V%~%Z`e(Tgd$dp>kSP_B(dd1tF>9_g(uvyXlIM zRMi|6WwvL*IEnsBM3J<5;c{HdM# zX~=@x{A7zXJM~-TtfqI$b`?5Xw6IfKh62icL!Dm9Q7#o_isl$Rog2itm z@^9=99#XePzqSjvsu%g4Z8dv1xs6@*55s6y)JXnTb<@p?MH#<{w)XdSleF~&$`JN4ygoYuH0^ zf7%=Io6j3riO~MaI&|I2!O*W)j`G#@$nG;DrLsjdRq?nXPJ8@4F6eP%T$C@-JDG2p zL~M$a9$R=5BK!e+>W1uj9eYl=<(^CtDcEz!E%(-ns23Ygm@iQNVj#>*DvX@-N4!j! z&%AOonuu2j^SN`FOd6hx?B%sYMAd-l)s*QfP<2f2;l0cj{1Hi&EniRPutw#6Mv>_x zTwQWnh9#etRA0Sg?v&3k3-c3wKR@;{9#=S8D57E+^D!xt&!L%e(;IUO1H5^jqi9*i zp=AV5*m0a;o@QmiTvmafOESRcJh))?+sMx|+xS^d#WCf^_~w&5FKf#0Hkr?}1C5zb zkfUS|KXZ6r!uiUdpG! z&2*`yE|034J$Y!`eD!61-jk6(JdmF$^5l^BHf%yRmiwimH+~qF!ZtG_K2HBmO9AKZ&fW zH)3;KbZum2Z5er$UHr&krVL?9FVXUe3-j@-{M?j3dpWoIU*YG{4NePoAsxTVpP4*P zX3Fo?n{!CC{XLczkBhL2+VWEky}S|6RW^(9IV$@R-cfxvE}~^|TYluUg*W1jSW_OE zmx;)pyw@}l75D~t9n!*`#j11S_;Tv5LJJs(MFPnU)LgfXx4 z6Wxtx4$Vj&Ju=<=M9-ylnbNKz?t`J$A$?lX-6|_RB}tRdmM05#V^n5bMElZqC91)| zxQLFW%`9H>HQ$yQF8AZK<&K#{;>3gq^9IFXu9gm`WsQ>iP4PyiFh;X$ScKV9R;wy; zI1H8*wnk@8&&VIS^@}CzW! zJY4b=4)Rq>^{SmI+lX0BHi|~FJ)Gt!TE6;WCJx1mhliJbc)&`MhVed|)tIKBH&KiCwgO`pB#!!%8j2ZWq^SMwCI5H|W!h(_r9_f;8mAw$H)~ z&;UM#@1Y-zg?X?Fc)!s&4h-z_RRbddD#7c}6k0=f7z)$jSJ({u;X1@b7{)_T4r;<1 z&Di0${o<4_&yLMnU--CzhzgT)|LyIdD%mfh=G$MT;nH4%$ppc{H)1|$k`X+>b^gs_kKq*|9C|9i3r!46A?E~ zIFju_$w@6adNLyCRD|p5sfg*P9Y5Vq1ti0@r1!aqe&-`x!_P-#pVxZ2xO#VWx%zc= zP4DVbvGad?cb6->yX$H%M{>5SE6b70@2(|%UD^Fzt`+@VYx+Bq$r-NhNXGY<4oc7F zDXv9RU9R0zUA@T?nH%k=1|*%eBxkOx{f{nJ?;l+Ue{}o|pXa(bPf50EN$*vz_5|*? z+I3Xs=xW#1)sEMBYg`A{x?E@1x;n3SxhAi7O<(UgEc(ke?{AlD!{4sqCmhMqprp5! zT-@dAz6X2vy0Z5=ex~nn_1^1pE#2$dpe0B5xo+%txo#YGbwA?x8GXRj{-Dd1b;LDZ zOU55`4L{^^%{$^+swGPgxegw7xi%kh?bechr(N66xm*X&xkjJI(DSb8=N)x9;SMT` zmR@jeyX11^TykB!K zo|d#)-k-nw zLHr!dzx|*iIh#ka;z0fz2b7=r+Ryf@`R8BD?^?09AsZd$89_fZ$Ud42OHoV+)v+B9h$>Hkp0#rC`&)|2~2=pP#D8rgPyP)u0qND z=!7mX4gP_c0)~+Ysn7%F!XAioF5&QtZ!eNLhh~4lmbcbKzDA;bpcnR9TG}sP> zJUsIQpTk(#0cJFF0cZu2VHd>3uuTSh4x?ZrT!e%|JYWbNVLqIIk|zB=OoTt-0gJvF zy24_(29MZ=@jeWPW$-VQ@)|~c$bhX-*hl_AZ&(7Cp=d1kwV*pJg5!`7XBe-*S1=KB zz$$DQFT=O67%o8Rd-xI~41u+95qMhGs0E)w7A%Gwh`$$|&N29D&#bY=KW;0Bi&CF!>JO z!q0FD9({zog9-3AJX+K+-h&KS0`6kyhPR+SjD=-z9PW9PTWc^Hc7rHRcu0W`Fab8e zN$`}Qy?`G+habTxX&5!211yJI@LVbCFieBLA?h*G0Ucm9n5EGV-@qI=1rL|O4rmYK zU>C$ZZWzzQ=P(v_f%^&SJ~V?tumUba$+FZH=m&p;+*_|2q`@Fq0_UJqIr0kn!dh@W zWf)Jv`_LDDh4YZ`H2R<`tbwzTP~I?Jg`Th)@;^fzfZnhL{(~~lQn%rIm;*=Pi3+3v zCcz$vt4JGxUa%4(E0KrL5mtd&nRG)h*aWUBhEW;Xz(P0+k0lz$$1o9&Kqr8jOT(U{Ie*gU*IsrKSx9P5K9D3v=KQ z*w0fY@Bxg4T@YIf{qQwRf~^o+o4yt%z;4J_hk5~@!!TG5Cn4bl+8PXl-SE(hEJnjj zxCT{T!fu!h=b_BYv||_qYv2Oh{|fybjDqbDolJdz<}ebrg7GRZu)*ul3Fg9HuwSFU zhL$iEw!sZ}*3UcVFcA(wiMpg2Ccz(Y&+D{t=m5(hGKIPYZD0ks>ya+#3!A`tgSrBP zU>DeL(q}+NSPrhY=%1lKtcQa24I>!_z&eP0+c0WFJ6H~`29ynSfK^c79pZy-upBN! zg?C9StbhXV(dM8hEQ1_~dY`%k&0z|hhQ}Jxf5Q)u1CKZ2c5#{gm)94K6~3G_FnHM>qx#wjiDGBb4C6)U3BSWNsQxv50!)Ct@aQ-6VbC8|fYHV<%ER~YBV2~Z zzQsn!ge`F2cg!nb3Y>w&w)89T6YK{2d-@R=02?5x9c2rhVJ$?r#|~%q~0d3(o$k&;14nBq!Z275*Wrn7 z^a(Hs)`Uz+Wzrrneya)XqWWqX#>WO~n3=5z@FX}N2g+uUgZ_)ruA*K)dVFp}* z=lU8(AJ__>4Dtjz!3Oa5qx_%`?1AE$^#9NUwnJ=x$`ZbSVXzAd4WR5G6E=VtNL_)B zumU_kpc8(8O;BJEeJQku`EVYd988;nA+Q|^W--n{3z!DSp!5*(1ZKk_C^?ii1EXOt zL=3|Y=m&qmy~F8~pf@asm=UA}vfy_pFp@lkuCN;1qo{Mx2ztOGI0I!z8^&ia95z7g z7~+M=a0x1nC9SX#JmV+_kXyL@4MoONM`17=ga;-N7RJC~C^eBZ!dTc0k&_4yqu>A( zpUj*JX2J!iIfXU>3*Z*KJe4s5{((oPF*ks5a2$$GCoKF3*Wrm7#04{8AH>bX7MKL* zAYm4D2D0H4JeCXY4FLvV!Sw4Ql+tToaZ<%tG2I zbc8ifcoFRr2E!6K3HL50|KNL=1^XfPSLz7#gpKgv5^R8ltex6E=f)1J@=n0QSOTztgT@K16IJkKucm z4o9HKCfXJBhuPyR9HGs^P&f+3j?%BfVAunX9%FtALt!U8d>or#DqMtTPcZbtG{^z>No4Q=Ooj{c z{1jym8^Ato7!6?v{0Y7@)CFh*+3+9KJj=KY^WZW(dyag8MPQt#UO;=;0!1z`pMz{T z36ET)J;DUo0N0?zCDII^!3fw0@;*UB7zJzKEIe|BdJo^hBv=op!M;jefX2`pegen5*EQuxCZzA$2AuuLnHVSdcts+2}|Km zI1b_#{RNbPB&Y|UKt~t`^I#q1z-5Rr*!mW#!kh3Zbc8IJ4eQ_#7=k;(@FctdjiC+v z0NJnxa^Nb&=i_T1&;Y)JVekv=fE(}-i~Y~TN6-sqz-G7r@h)tEhR^|ifGMy9{)D4& z9W3rTlz?jR7NkR07!LDbBOC=+eqkg)C3qdupeIa%Rd5JI0rpCUD)1^ahSty<#=hK<REGNSH4KKILEa;}3~`n)o`pBzD;Ny(VLMy`uZ=sr z4Q*jGtbl#s;>J!nXaao6+u$o`?5B^6yF?$rQ1}^kLILi3$uG)$1S4S!6yTPZyzRxO z3AvTVrw?EU+=AlV`FjPvfI+Yh{)1xoP#2*obcXS;0uF*J9(Q;innD+t2&>^3c}AsBW>^qlmZ*dLM5mLsqhVCzyw$bTi_TN zkFxP9|BLd!BKV;hWWZxE8GeDk;4DNHr*1$K=no4a2O>*QZqN{h!g4qPg-ep>&;+`` z1Xu@WAhr~35SqgT*a#OO{xSL^_#9@#S%@!9THrGn4IAMi++T(|i(NzM*UNMK1~h^W z&>LpMFK`mX@(G@T zS}+8bLVI`z4#6c@i2v9p$s>3kUWLz~4UB~>+-LBAKCFP>;ZMkc<8V(o$`}0bCUk;X zFc0>@5%4`lK0z(`0!G8{uoZSgfv2f=P#J1~AKF4k$cEhzQ=Yh>4)~!9^n>N_JKTiG zXQ+=*8Ja;FjDy**ACAF2&$1^V)PdKb1?hVezT>zr%!Y-q8xBHL1?-2i@HEtgFQ6@q zf|alto~npWcpoZ2D`*ELVLqIPvXzK~|6lNb5#+B-{|kd52TE2U?_nXh5~=?%5dMLZ zRjGII3lykEKLZot3e>DFj6U!eluV+ph2J254ca-jy~6)Q()bSlzl6Rp0+zsfI1U#f z?m1y3KsBfXpTJiz3?{)w*aP`$GB&`QkOqAr3x0uBa1;tWPkV1l!>vJXo9l1D=H%@G86qpFlU504rbzTm-WYV;j5-&7mWVh6S(= z4nu(#&AT=PXakur1}4K2*bX1T zK@hJopMkRQ3e<&G@I8!zNw5~Sz%B6l(GB&WE%b&N@FT?3C12qos1MblA&i2#a0xuG zlQ)n8nJ^9x!F6~ph4u#Bpg)X-8So7(hK-N|=OMBlaqvH$|F6M&Fc>C7!8fQ2@Ei<+ zF7O+ihvIKy5C0qUzdIa(wQvgZy+vNa4S2Ucc?GjLF7-C`7or+a-{BC{dxyChlzW%F z1?xT9Gn|C?-e;Z!i4EDC1|DccIYLxp{J=SA+=PAsUPvWv@OV?=0F(G!A5hNlCJchf za01Rkl@IZUtuOIE1>S|G&=Q8hD%b+~nvvH~89szhVK5AbwQva@ZBDtv`_K%A!A7_V z`9ES#0*TNOGGH6zKJZ5 zrBTMP2v&o;1?h+SFbw{Nds;F^!a(=~taM~h58j5MFcucTRrn98eMY|x%fRev8#X{yWN*N`@CAGg17JA( z2#eqdT!Mx8kNOsSp)Ayd7vO#P0KS6#nO8!A zcGPEB0Y%$0pMd>PqXTn$xCQTbs0b6X*-G;cqwzk$o6T;3fD7 zGGGR*hm#Q5mpp>X@Ft`~226&(p+E-xC^Ugy@FQ%2Q&6xUal`Y_5Wa(`#4wnve>epg)X(pI|-ghkxPT0rZ>j9J~dspc{;b zKQdkQ+;5b}|`+uNa!*fs?5|o1nAre|b zCzt|%LJmX^q29vV@GW$NKJWvKftj!f*1}&PhEmUnPKz;&;q`OX|Mp^ zC)^qSmmE&HK@NO8g1m{JgJ2l!hr{src=Qlw zIsR9H5u_uD|BK*vNSr|WA$}tIaO=YV^AItKGUC6F|25z}+&c4r7#xLilW8w-{}k#n zOoIzhWh(Uvw!#C`upef_O?Y8C^&OH4znlN>%)kbyK9lhjj*#CE%_9Gy96Sg6s3))S zzbW*B*{};Dvhf2|p(V6~S+D{Bi}}AEPC)Ez?1xWb1Z;uGIl@SQQcwq8htHrb41sa5 z3^u}P_zj)c`QLjk;bA8v%)?Ij48DVv&Y_@Zdtm6yk`6iGzus^Vi@@$b|WD0G?h%zYRaZYB&dv zFQ&hPFQGk*gvehxhMw>ajDbnuh6V6D6kJ04hPp5arogYT6^_D9@GfPXlUaU-%0CfG3tS=EGRH26b0ZpTM<}HUWKL75oPkR#D!tAN$+# ze-&(o_|@oyPhc+Gfc$I7e;5R#;4qY3%Nh)HfNa9790cOEo zI0pA@z+R{e?co3Lb|&C)Rb>MnQUOKz{Y6|*R0ISSuuZzNPi7|R(8rdbxu*a?Z4ZncR zZX=FRg-^jvu+{CfrEop$cn5g~_rmPo^1g+KVdp!EJA4x!gty&AnZchfu3v$NVCTC@ zJGcT;_fQ|w~f^WfH@cQ3Vx47=X_0w=Gyzvjj8BTy3VY5Hd#=s}wD%kvC@*7Tp+hK=4 zQCH!o(2A}P!mIq=;t|pwYLJ6*I3CV{pTa}X@+fr`>M#jq*aMD(Q{X(f8ZLob;dk&f z?D1#v3Elwh&HQ)d^0=^3W2bUpx zJRAgf^Y^yo!!ukDIGntFiuWK?;ZQgNj)k+~GPn`$g~#FVFzad3lI!+ddtnuv20w$} z!mIG+XLz^35F7&+!By}KyzyD`5~}cNI2W#mdtmdw^PYnvVFNq^Z+s3r7EXh!;BI&Z zws@X80PS!Pd=gHFpTa|M32t8Gy4efVd3YBb&AlDC-U)w#ZT>;K0sU|yoCaTm!{L0m z7oLU3;6<4IPuc}I3oeAC;SzWac6^a`4fcY!Ko@)wz6005E${&R9li>i{R{W74}227 z43EOA@S&G@H^8Cr3HTx02+u(0%fuH>hi}94u<0xK2Oovc!587Da4o#)Rmw9Az$DxX z|Asfeh8@6lN3NfNZ^B>T-_ZVV>`6EUz6KA#zhP0TCA9+1giGO3XxXGCwG`IC+3*wi zE3~wV{+BRQMU(4X@v-CAAyu4y)lP_!@i%?t`ab>$jo< z+F>ai0u%66xCkDC?YC}8?F&es~17csFSSi=iFH;8-{bz6zJY z)$k2iQpBd<`UJdbTha^G!bNZyJOpogPfKcB=z=jg3oe3t;Lot_ds|Zb!{P7)_zS%I zeJ!k$Cwy=pybK3zNBM&}_;~}@zr$bnw4xWW=c4|p|5cY#^I1&n8{q3V03nnL`}lRQx&uuHg5t;Q@FJwrZu$z~A`Y$@OD!1hnJ!3tTUQ zyWlB!Z(B>M13nLz!u{|+bIDIQ1#X4^ok#hFi{LewH=nq{gRu1i+FLjeu7|fQBp=~e zI0J5n$6@(AituMYn*X!Uh*s?}`am{h9Kn;dqJsc0S z;Y)B9TmyH&lkf<<0&o9VOX@?gC*Js?C5btQ%Vwmy@kHUc?yi;J$QPK=n;oqCbC}VIE+yXZcrnetP zn+UydID8t;h6~_SSO}NF9WZO0G>83R13U?vOyCzxzy)v_{0W|fZ6=8$oCIgW5pX^{ z4f{+{7U9!yD*Os=hc~XJe8a);ZSrUh*AwAlxE7v=oewAN;Yj!yJPdDG$2~X&Zi5Bu zi8EXQPeA?%$_o4n-f$#-!vy>Uo`vm?qAh||unxWjm%&5u_M@poa4?L(H{pBm5Ig~; zkMrJz^)LV@!~elu@FcwM7}`(Vp1-f<`b)S2c0QJLgG=C9nEOfU z65w#L7M zxemk8a5KE?1ng({A+(%GTEV&SB<%4yY)!ZiK7114fP3MCpC^p)YuM%s*bVSa;&nP) z!tWR0A-D?O{l%8lzqt-@T>&@1x$s4J2z~?qfJb1HlX)jXFYF12!EtaN{1RS*>?!yM zr@`|uek$z<)HsBY00WO5=;E!9lol13U_E`xfDZRj}jv=!b8? z^Zb4G1=uvO`-Rx5a2M?GZR}jQ8n(QMJb-V*3y`~*b_gDZ+26q?fji*C-{t)Vx5IXq zP`2PY*yekbd$TiEnw@&kSjJKRDV!xK>X4P_a&x|R6BJ+Q}Zv^VfP zT#uh$zmqh8iM#2q!N7gAqc81J@67o%2W~YhH4L1}l3EAH!13@T9K7wU)OB$2j3|GUoa6Q}tzlHnZVR#(=4llzS za81KbYx!0+Hs@K<;S zUVxV&wPaRmGuRT|3h#pV!%nap%!Ng;7i1t0Md*eDVI>U0D6EH%!*OsToB|u*EI1D? zgzv$R;A;34+zNNYgYYms4$s01@DgmApOyMw*cNt%*)SjWgdB814;%z5VGWGH6dVbk zfaBqFa0;9b=fJn%J8&sn0awE>;0E{&+zI!=@8QqzBs>ol+rkd83$(%RurG8%5qjZ3=!a1_96kk~gAH&lTnzsQ*TD7g zTX+B-gQwwTm{p`4z&l_E*aa5CK2U%j=z~F+g5%(5I0wEAH^Y^1Dcl2(z?1M2%qmg# zVH?;9X2YJ4g9;o7Yv3^WID7`a3}?ed@PF_#xDoDwKf=@SBK#ZPRHppG_OJ`ghkc3;bhngPKU3)}{90WO0d!=vydyraUq1NMXc;V3u`PJ%DNSK;gMZMYP! zglpkuxCei47>=bD(yII4ex;uz^*VK_J$59!D^_%vG7?~2PeY@I2+D~OW=oa zC0q+Pz^!l}{0W|bW3dG{zz^VG{QYm3{bg(_{yv@SP4EgVJ`LL)%5Wo+>D%uez*&M4sZPiwiwqrTnFJQ_y=@+6B`rGf_vZ}a4xp; z2t0+|{3^EW>hHIt3UDs`6CQ^*V$XgE_JRxGdiWhIyo~t5d2lrR72f}U*nqGnd=-8O zZ~Ou2%eBaLE^L77;BI&x-u^@SQ_usSf-~XA@K0FyBWzAM0nUeO;9l7Ja?+RUYOYoI z1)K#vq{j|$+!f?A9Ln!~f6RCZ90T8lbKwEl=1Teua3TB|-uDy6EV&-U^=#Pur-YyD zQCw?qGP>74v^%-Jnd`|{Gp+<%^Lzd^lplVV_&pCE;O|qR=Vzn? zoC<%0U4Kqr6~eYF}6f zXThEDXLu1l^jp#jPJr9tT4=eG{u|d;u5W`ia5+2&+uTKYg`;65`~VIl4)461{wKdT z@cS}Y5AV8%u?p^=#C0t^1Al<`-AjKK4uchN32cBD;P;TbpFDvd!zK?95BLR~0WYF= zRxfGIb?8Cj27iDz|Bmo+9pqYs3*o158$1o0JwzFV#ZZQouofBbNv|n4DNxa zV2ei?tASl14Sn!&_&WR?{s5c&ne>1?p%*6LGw@ybEu=pxqPr_@k?O%y6EQc{T7S4cM;773K-x$AwC9nYc;B+_^?txq3 z%}=0<>&{#cgp=V$coOD6NqL2zz$Q=8Z-$Fui>GNT;YRr2Gqf@AE%+^L@+{>D&V*mW ztMKl>lU6VScfpyk-*dD@@I4rV4?a(wflojYvMT?OkuLCgxCQDjGe!m{zCu}p@4reo;KF;I&3elU*-BF?78Wz)OX={ z*kd#7Ft`!E2_M}Y9q>2!6?DIzJ{xR+ZQg*b35UTca4EbD`)+|v2FJpu-#9Dv53YMs zZYJJ@9R{OYV&ik|;9lSVVE^&=m2aMvnga#ymHwCh1a$NJO>d$9!0%si{W1I%Zhk9q zfWq6bTerrhcsqRqScCih-bsAlLBDew`da+{@w?Cg$MEbv4vH(l1@FNwg9qPB{{h~+ z9rnxnuv_1cjl%DfaeE%@xji=a4%nMqx7!h$ir;_Pc~)uxyyZjK^dFp+`tXOb<@r6b z3;j-hAGa&<<@e3I@os>(&gR}n2ow4am_t8@zsFnYi}QPrwppp?_+6Vz-<{vf=h4@K zXW=?HdOmpu--EBi?hEMWF$S^~;~m#C?r|P$V9evA@FHaPz)pml;3IotSHK7MqW{A6 z|G1tADaJ-Ffa76{eeeUa@TPrdrT)dWlj|4Zcd%U=eb5j8g5Sb{?er0d%P-+pco?37 zf5FR;S~e@SIs7kd10R5oz+RAs40J*f_J@OD6&wafz^CANI1#=CXTsTVE_@xn2S0(I z!>{2kxE~&Yr{Q@>Ehm5Bb?{c$8Fqo$Fc-2=gacpzhTsVJEPNg|z?txMI3KQnU&Gz- z7(55BTS0#Uc7oZk2=<0N91LUdN!S1vz@=~v+yW26g6rUR_&xj$o`QeF>kp)C!rS28@IKfXT45jPg8iWnM&TIvESv`C!w=yG zcmN)QzrpkH5^Qo1`3gJ0ZqNpMK^}^*99F^_7>9LmESv)8!gt{(a3kCUkHX*KCD{C< zlo!|mc7b`YFXW&ME8wHB5(Z%cj)u>{Y4CM8A1;Nf;5N7)9)?HZNq81sgf|>a-wAer z*^q-SSP4hLr{FBO7_Nd}!cA}o+zt1`L+~*C8U6)ttYIU;jxZO>@KKn6Q{Xf>3od{w z;5xVieg}`ki!keB=zv{eA>^PJYA_5(z)A2exDtK?55nKzCD^KuF$>rg7DE<_upIhe z2-d+T;4|lrMz|fGfR>fS3qAlJhE~`eGO!<1VI_>iG4NS91x|;r!MEWu z_%U1yH^CinFFXuS!)vfbKluUM!!EE8_Jur@;UMUPH82UEg45ta_z~O)_rjmyId~B^ z9U$zmHEahvK^yD=AA=!S3n#!Ca3Nd^_rRavIe2{?TNt*5U0?y^;Se|pz5-u`bKo0r z5&RJDg}=ZXR#6w>!%%>gFapQGDR2gS6E1ci~F-8QcVS!0+K{cmZauVGIJcf$d=zm<3c1+Wh+fi9@PGB^nM zq)4g`V{im486B>#9Iqd~UH?$u$m;FZ*2gCXM@P16UHp;OkF=&H#`|Zl9q#)`|JYb+ zc42Vkc;EQ?+1dKq`q1cDeSCIiba=RLWS}rOQtuue9h#V3%`Tmo-PPA$sxEJx|B;D- zLsPy#X7@~>fA+{|-x_2G>a)j3hw5WPed}lU4-M8wCR4q`eeF}LR@KMz!+opkJrn%L zNaNCCX<0GV+cz{-Pqp_=)VtS@)l*Z0Ba;i})h1I@BNKzGN9qIH4UG~3@n5Rne!uUsk>VjQ;jbadn@$sG0ObIW2}=DX6Jap6~P5-6)KNoNULS1H?5h*Mh5mve=D z5ohVllEjX+4;0YYP`e)V<|^H}fF^@NUtbSwxX4*1dgk@*<3YK z$(OrHoNBJSmPoT9lb``HTniuKTGVBgJSpNAvNWlgQh7zZtBy*#E0?GRt57g#O3GBe zm~JY?5J>WcFTtWp?uf~v9XF#Go*O?D}&6+Wcbf`nC-)ZgXN+qAG zSh7Gh)3MHC9TZBk6;EgeXxG{rh^_+VB`c~J9|a1Ev>kZmcUDS0c zR|&T5!Zq?1vlVdC=SlWL~K`UIoRHJ{T2=9VRoeJVoou`$DwNCk60 zK^gR^)qg~nb$@|1i;2w5TAQV$`-*NeBHEVKg(sP$&9N;|j-4zG#iT;PBzCHlHQGVP zUTa`=-chP_rMrzb!|F<dX$D+c!U;M$!G!YUoPusip>QFu7$gwuO;_@13}*};awk`*M!m1l z+)c{!qKa;>gJJXJbV+R@UJbI5(bJ8os!X9J(=nb`WZE;B(4``sx~dq%h;D>K#p2&5 zKMt62x#bkY zB4(Y>?~pXJJB|!!uyDF6^gA}rC+(~$Kk{@aqKr~E47!A9Kcb6cP)Ma$oQxa`(B8Oq zVMv@qlS++eg-E&KQm$emF2j>#d@Q$I4Pzu6CJPO_H_jFpe$QoO9D+WHR|bO^a?o>b zbiVsL!cPCZ+EW%$O;Xp@5Lvd8UPk9ubcbV`@IYR8Dn~tH2BV2` zsl_`OM(A!`AmkS{BQE8HJYO_Ub(1@1Ya;YbL z@F19UHt}X+;97!)qD-kr*xNDFB!++(0}NNB#~+ioZ~h@+22o0dUQE@fd~pi66DU!m zt!fhFNR`p0v=~<|WtKXPb3|!W?T<5nov7ClXpJ2^tRr8bA+L4ii(TpEG4Yin?G=WO zvdqkh*fL=fJ0}tFSGUXBED#C`&D0wi@d8s6HFHQdBBe}cUeg*kHfxvCc4A0lW;URn zPqoN4GFY>Eih0ppu4$``ZeTH+u4EIGh@d8C!pRw_6s8hYJu7EZqq|;#9INKU`(BQv zo3kdWhDB;JQo(zbIfYiqTdIa3W!9`%xnK$`De+oX)00$ZT8oHM5|r)Ecd1D}&oG;r zbaX@F8k%ydJGw2m1dWC$Vg}la!m@PuJhkVln(vf-BKstJFr{=!)xF_#HcMJZ77DvS zNS)G-OXV~f$&71RE(maZiB2fp#DvJkziH%1f3%X2PzTev` z&2mRBmu*k;B8g6*U@&!)!RvsxfvE8LVy{@Vpj3uVEd8SM9Mb_s$Cc#Y2)j}!G>JO$&V@~Nc1j-DI3al=NdJ@fQ zlm4Mcrt6tbsC`mB>aaFr8zlwY(vM=~`V;%q*#tiq0Z}rn-Cw zN zHWv*=%dwxTqHO5?T1@NaED&-`FuCSOkFUz{Y*oCJ#>k~=$wJdi0WHdJjlr-%Wnrm^cN+_kGPNbyN>ux+jC;{f z4rvHI2ZsF`;?{i+ctSBRp1=#!Sxe-}N919kX*E}ac=BN(4S1}Jv>0{lWG1~wUG-_bJBxDCE>cJ1Y~)-^$qV{q4zxBkOyn1g44Jk^ zbaeE~$DEe2gj36?b}gAmby>QcSt8~djWSnTx};)JFRF-`29<5U-H8r?vlKzO$52nV z=XrOqFeXH`3UV`nXlh&_Eb93{w2;a5()Bfc_H7>M^}%_y+ZL_*@0BWI8DP3AYf+=T?@Cve;-u8RpIBnR z;!w4wJH%_742FfOn)cB8M6_?RDBGR)=CiqyXwDfD)IqV=5#=yJ9oHK3Y8k&UyLu{= z-H7U>B&zzwY-!L!3cG9@tip*si?6bN{l+BN3`F$=c5EosC}O50hx}t*iz%0+ZnZnd zw2jqmjO2+PaD;o`IwM^c-0}2~jC7sJLMUg}B76wGrj@jQ#Yf2b+<7nMkTCh;;SMOW z_FOL0=`m+-LNqAvpuFNhsoW6F30>0X*?5XFiL^FdGw5S%Z3FuE9KH%K8wn( zCDUWsWkR%F!Hlr3W-#WatNQVhHIASQOF zHRW4psf;lrx*BpKS1itGPC??AzL~gkPAKtfBUBP2SPu0v0v2U%nDS2PFe^n;+5LOy zD0Z)?71-Qn4pTSnxluGbnOtfr!<+~hI^3qL2}Sd#Q@Q4|KSzyw7``{3l2$cMR<8MM zSW>oOQ<0ud<(iLy?N+X8u2T*aH0A3ID;aT3YFT?leSU4al`J6HbS3k$cREF5hLz0o z+$f?=Rx+!O;nYN0OT9jKCI3*;;41o2_ZJ z#xXg2zu6nEWPOIJ!eT&Dy!2!}35>jkH0c&Ctct6Omjc_OYe6M;JI-*WR?^Vl5(}7Q zlVT6Ox7J?FZAu#qEtPybJ2%x%sSrC*8<)mtxNT&lv$JfmA%m80rc^27y9zUwken}Z zt!Jz0PXee_(%NEA*kjO(Xp@`y`7SveqXpEEj^9YN#Lw?bLLudBX>NtsaZl0?osnM52iBa+IdQqg&0aow^VZ?0B4;Nw9Bp72<#H-Q*C@HT%C0?_tEQNn znIWR?5SBMfX*Tg8Qh|8@uQTLniSsyt3Zr{r4@X=Xv0H(0g~TppD~Lt_BZb7vs@4aJ&3Z9;UGN}UDU*)hSqjy6Y(VrU=eY0Q}F zvit!D*iPcGXG<_VBveK$b*(TP!tpm^pP)BbY|jHX42`mlWg|pI;H^YquI4JNA7euS zMkR|*-J?m82gfhxQ#svmsdeRIM{AA!K4KSTOs#a&#vd3%Ptu@>l@aXrNn`b-LuySh zFEOLi%Q3FxDi41E++gRsIxV&k_B_kW$$B-BY^}SIZzA<#Ncu9-Z=vRdl23*2KSV0fb~uGq2B|GX$Z_Nw7LRkRJIG~HlLvnDMVw6DLcE%~t&Jbum1i=~|qM=D^H$;n3 zo14mjc#}88LsfaiRsh|9w|g5*gbFK1Sq764X0Y4SYcdI}I${&RDit4nQ%o-+S4)PQ z6LsS%){ZA#PijlNWL>0&Mi(o%C+uj{WWyXf@>Q|Nm#x<{INqzs+HMC!Z&6@mnpH)X z9mU$hAOiUgj$t$hSL7-?`@1*jxtgl5>61->T}il5@Z^jTQ#f1!{dbnjut&)(M$Lx8 zNItu$Exv}3j78XhqA_38aA0sqbaf8PvXVnmEqn-`_QT+SW1~keWwbkBS`X@ByF%0_1Kb84*DP2f?&SSj_#u?SVwF*j`S9a`P2es$| zl=1D33>nXJvrz0PWaET9N5D^BJ&qHY&f%SXM{H%%-CG==^ntB0(M zt(sgt)WiZgGyKz7XEnc;WSx@km0~Dz@;)`XI+2%4Wt=rjwQRmOUln_EW9yfm&5@_- zFmSOnSF9yluEfA6=k%-XqRp8Zrlb+TL@m1Z&}lVG-Q+uKyJ7;+V5H=hX9_)4F_Bo! z}`-P0Q^R+$nj zo5kLF{rYu7o=m*)gaB=xP=(Hd_MWVmnn4e zuMDME7-Qo-J<`8a~U2FI>tIzFqFcoF_D;f*E_baew_n0^S%}lqQ)C{aN?=f zP)DS0qlgH!?@;i>p2_Go<(Hebu`fDjJE9j}SyT0Rjy*eqB}do0&K&zbITlxRqU3ic zeaF^V?KSr+q`L4WoKSPc+_#}A?@wf_zv9CNVi$-PQm1K?Bgf6jnEei`q$OcwZYFiC zn~`bGafauP`@XXVB$&(kR7bMcog;S!oud`$(%`K1HBX0Z&=j67!Qh0P%;*NiUUVbE zVvd!0=l{q6?T?scT+l9ItauHP3r0olm}#f~+&KJTkA@);(JmOS0`^gaP-EcCQ_A&$ zb}^}}i=4i=#aEq$7uzJ+#Tes|N9aYYN)HCzI3+puMHVR=`{Gp{R3>IQmAS;aE45ri zEuJ--2YYS?{kCa|4MGwnG1=y}rX1E-Nwd(_47%Mq3i|52ADvUgT_3GYjAK3cOn{&Y zyQKRf4>~u)Y!!6`IR3G@I3eIh$1dqRZ%Nj%1n$Hu)2;W@5^C<~6<=Z}2w&`>Z^jgx zF9F=6P|Pf`dl)Q6K0W@UUXrGeU&^RH_7MCBe6*}{5QD)<18fm@Q!qkDFzK@SbZ3$8 zl8A4+tLZ(j)LQF<(^xmknfYcF&Gcl`_QU2ZZ%gUenY;BhE#bQT$IV%k24gQh;mp$_ zo1V0=TW77_Bv4?K9jgje{tKmL_{b0sA3xLIgtD*5a*v7-lZtu|n3>Y{cZ5YM(nj~W z*I6t}Q3CajkwnB;n37aV56VY4cUNtRn8!Q!MZr1KC#m`b?tS|6C3k{1sTavae7?|4 z5qpngck266DyfjlF1?h;zTBeCUL+02(!2_B7OX4I{#BNp=-f1_ioKl0t1#!t{{!mW z+HVh!5e47opjg*#%Wik6(}Q=d{1$-WjZEOkA|}7@JY7M#m~tB4x)yBn9tS>u6pU+p z+h$1UbQ4RB#Z*Z|I)w`b&pXjN^+8Ye{0dp3JB@@4mhi^03dg#RDVoU@#a5H>FvMq0 zFc^BIC6})7X?Hf8%9oz_RGvC5O)N<^=EjmOMR29Ha(WLdGOTQEJLltp=u?kD9oP1jI{!CINKU)8vXhUGc+Zk&zAPa)V{HS=_FkFTH#!k}f3- zsXFe)1HQN=W(vIh2jL3_gYh$$+-o)Mg`SF+{4kGOll7-*L&oI81?@S}=sq^Nf^jz%R=M540xL;^ zALX%gHfn-wx?U?EMJhcI;bK3 z4*Qox?2!n1sh4V~wQ5bx+xsLPbH=e*M0*&zn@_EQcbCVh(>vYs+oD8+p%yfsdk^G@R8HRVH{CYAlG|CuQ;FsAeM7j88r7hRNZQC zmY5|y>1DZmf{{XudY?A;^)_t{MnOod!}$JfSGu~?d&!1s(6ecxqVYin@ikkg(M@jI z^78ZTVXS1kyzkiBhlt$7He$(ElVZ2p>!0j(YE$B-domSRqkMXzo%1XCYWWH;z3kEf z*LFw27m#rpkT@%nPnuW6Q8;38vp9QFexlx(3bHG8ikDc^RNxhocu^9jR^IU=)bl2+ zCW2>Mb{N|Y&DLSLo*}rJ}mP{}iRg0kZeK&caq$z4vZhKM(J$B#QRgMgDmZYdb z`X&pTum34pZQ>zLKQtB$HPi8sV{?4?nqviH%a3Y~eG<*5ge=LmY$a`sQY1@5L(-`5 z$z)Z~+tfOdTB)NpH#WAxEYt%p9FxcVgZ}{eWMODj5F3rIe5O(oOXt*qIT2ONb%~KL zdyx@t)oM#ebz+C!%FkF4UIH|PLLh-J!Gvb)ma|RxEP*52WDRMMPoB*2#n}y1!x2XI zLJ%_Wnf}{4qUmG-VSbK7p=5HZ?P*3PPhW*m?2;)_EwTK1 zSTl{g@ha1@%@+!1zT ziE@S|Fd=IPRuH4?Oo2YukMrMvxv{6I{ruja-I0!#2js_CrFuuuyv{-^r4e8bg zHmT_sR#$pCt+v<;&RYvBm$VV%I?g1C{mmCFXR6&dGuL?qdYcVgmB|`ErIHNJi1{1L z3s*j2*>6x_6qXPbN<{X=l+9CPfT?qd|3JVH*wly+(;x#1iKO=B6c<0 z^$q$sr`@x&NuAipx+ME2$pd9XC}L0~Tk}td%=I#3hI(S75=>U7mFyBFm0;wua;bPs zx|%N*X_3g$XTAU59tEq2>tDQCW6C>$ZbDpdTFF+Jl&CO@pvFOE^HS(3rYpA3UD##h zL`JAgL;9)?59OpsbjZD7znBzGtS3IlosFxyfno-RoNf$Id=@w@Tti&!IUbCHe>b*T z=Wn#Oq6^9pb(85*DzjXd=9YcM#z>9*jS;AfK2akXt-?OXs$DZr>}yh{Dc_vc&1MG0 z`=@pj>outH&I6t>5XfhCa_Y41h?j66Nej8oG>dro&K2jY_+B!}iirDCCxP@)8`CQm zc&A!yok`AEvRk^szXA{}wqdG_JPcPB^GN|GSc!B617_~SobT0}WEO^B*zn&ju3BG_ zqpZSBE1TKwQd?oFiqKGtDE3q^fn+JN%b3n|PciHhDUEu=T)JgR*P9OcLC7}Wu!=o6 z+V)htHQwT*kB} zZk@2FMo^P}3+Uzdu5hvn?iV(Son2fzH(Nft!(brY%xA>HH`x^uN9P@>otqvelZYtY z!LaaY)1=#{0>LuU%N{CcOR`>k%cr^=4c^*myQJ$-r$A!&@kymygrE=dKpF8dM163! zwjPrOPb~(~D*-}80`Dd{BVweWTXI7>AVK#b&VnT`-IebwiM2!eJ6Nta)m+9hw;ps( zQ|OMk*d>A<%K0YQ$6}&XeYsftC3cJ&)>OjKo6UqV!CuKJbIpy6)u0H5EJm4;7m3t~ z7qRR-u~eeMXMmlKzFs-u^sxT}x|$6cpDEVE?o9pW%iRnkbg$s7L^rwYjaHbLS*7ff z!Iai^mtHxCQSE%VIwk~(^%yB)50TnP$tWKmP3ZI=BVZQ@dhImzV!omA;|s|KAFVo! zpeNJfHdM}T1GR$PX>yuOQm3cySx`O2o+?8CUf0K{ruMXQH^&unIgUo*Gz?`;S&O>g zak%X+Mr0kg=5eW3rDDxYV#kg{K71+*gIY?WS3_FWI})<>Q_G8uy~b9NQct&-Aa57@ zDAbIRta1_Yb@x3xzM1vR>WAadUCCj7q}D1CEY+67Q#JVG^zeCkvJQ;yvA>m07}_LGpz{D3lA1{DLyyLWF`| zs{K1zMv*uL$uplqq@8q9Qfj(IE-Lb?Nswm2aHtq$3Szc7F5*F1@4b$w9!{6LPaI1? zoYog>8wRxoO*`B9{`M;DnloC3-jSg{51;~LCQ*o%KotuOkDFY{^ymg$9Sf-^xqPS7 z+tuhLmU^VdtWwqmmVW1YOfXi;S=mqOa9*c5BWYj8iSgzebhNt)=Bz8FXJi9mb-Fdl zlu0vO%rB{V4kLYUwbG+@7P%n`$;d!C*2_S= zHd9OoQ5?*b5Q)NOmg$8vsSpdz?R?21;k2dVdnHC~lJmba($KtcZ%<)qx@d+XnGT(` z=Gn`w3phnPBZjElJWZ4nZYdGlh-E7&mZylB1hda&AHN6|e{*2acCdV+0EMqTawC>3 zG~tSjT(_&TEPT>$@x~J;?Hb$#PinqatfLSum1&UK1ub67*3a(R^+wmssd87KBgFO&rjaLT)Gd4J1Vvq+LKjQ~O0nTXm`Gz!I?Sc2=*is& zL+_K9c^c-E?<}_jaR~aPU*HKO^*p36XbHrE9@^AI#!!dQ{dAF9q7qTi9Sf&Y>M1+h za%CAd2Hx$3$R`=7Z_Npt6Wa;=lD#e`Wr`i8koqB==^mL*7GIDf5=DxtylYZ2n1RlL ziNYXexsx1%Ojo-z@*90_wM+KuP;-q&DN2x5I>ya6Lj*|;Sow-@M4H(JVnZ8DFjtK^ zre=?5&}q|tarAJkjO&Rm2s&8t2^ths>6k=LzO925IU?mllh8XrBtqH^r{_9BqR23M z67kUr22$HHa&VV^=XAysXCm9Wpq)7hjun^R;&LmZy`ULGGCgHhKXG)8GpZCHaoYrO z*eON9M3QAjZb_5#nxP z0hdYZxYBDz)vi7mF!dEM#nfJtS&4%V}oxPHL+vySyPO$NR0K@d@HLbbZ+_voWlJQZ+`Gfo8?D5#F@4O&~0Q zj-fA?4>TjFVy3&5a%A32MSERv%45T_ffqxPB^OfN$#nsUp1 z!%)*#VI-7YiQ%t^jr&e8?yJEmf5SaPWqNqp`RIaOTQG^tIk~xJ^KUN#MhuL6a=Z+a zkzY2aW~r7g*;#6r^_SW zTb|1*Q)#qS5pHSIICD%|fo8bHZm5FAcn&6)UJ0OIk76qTd!L(IC!{kS6CMj*t(xgA zF{Q66r#|w+o+QE0h>vA*a)z?R^~=N!O-O?fXjt4vS$?h7nR7FWK!_TrUD2_j_a;A> zzJCv2r({u%)6^@i#tDoe>Jo*j*{J2j1RfRE-+ricihb=E11H`{h|_TBw6^DxA!#-} ziDZL0th*e}UaOcC(604>v?C;IL$Y-5E|tWazQhXdZn1{mE~RWxCq9jX0CuTh)ZBh# zPe+GZFfI;86<_6$mZTe1dm;z5*`pq##4Z%kdT4>bcVnD{HX9UssCBy$BJZ~??HY96 zMk|VlK``AcOEuj@3&}^P>L*$-8lodm-z}ktErr3D$ivN4^Nxvt5g!+iKp13y!@Be~ zSsYxmKpt((sg_G}1+f#t*vHvqh_puWZt6r)w$v@^W|lR9Ufk@ZB$zf4nv8T#rYq$0 zwT7N#>E%1)A|_>YsrQMPwJpR)^AwG0f#gdd6DSq#(4`a=?`Z#i>6n1)dg^>sG;9Dy zw<`%UjFqgrW+5ZxaJ(Vfh7V5B_?QW3`JN7e7&}=uuWE&S3%?+7Baobc`#_ZK6 zm`=e_DD|RuBV<>i2C(#ZC4(B8eqosdZ}Xac1N(&yzwND$e&OjNt($t^AatahDv!y< zw0)pGXukg@CZfcL;8J77`nt)~s-eEs6RF-q+ox8os*h*J>wS~;4t-Burv^tR=eE@* zQxk(n)Nz@uuN~~KSH&Mw6Md`2EhXF|{!%g%Yx>3qM^<-@4v5E9^RK01P)443C3olN zmWzY0*^VrG^yU*fxS5^`6QOFR%zWg=;jpRtRK25bcyMTaeq^A&E;BkcGMO6N-*=nP zPNY_jjt&u&{4iluvCNOG8ui~%VILGuy4R11kW1(4_fp-A%W;&}tO%~kI(eflcw=tx zh8yNpqvOMUlPK@dKlmVr{PbFl&rRey*v2bG^}Wix&Urk*0c$BUOBV@$jYsYC(&ppV z1;+DsL@N=`EAu-Sw9hw``=xnW34My}{Py|j)(GkOm5Ar*R;3|KzOqO?F`~n*sz?X_ ztsXJ3;!iQ$*=~wWdVX0B_YB_Q^ItwUowjgrvoR#XrNvv})_%JoJi=)(MB-j!nA+{3 z;Re>WKYcqGLdSJOKpgjB4+0@{oclxQ-1UdB zLI~RzrNdH@2fN$Qq79;%m4FiEcu3KZsK{pxvBBgHD&tn50j`K!7ST?tw7A=-K;E_MXb)2(9#}QW2cgQ+5&Fcl zFqQIIgVqIDzJVm=R#1nyWfASPN{hRV3glh8j`lD$;(=9@d=RQE9idM=3sWhd1+~i7 zv9;2Ql`bDOzQ=O&+ZT5(t}G69khRpM<%??BHnZ2^d+%jMJ_bWlZ7_jc#Wl+_SG(nz ztKK5mo@uR#_qFOT$+=de^>v;Kb5%{mHa<*8%ciBRYZ0ch%0617pQMJ}zqz3zbHhZO zL~rzkk`dy+(}io4nI9^%V2M*88l`xqve%Ld3qzx{&>kgD)orUOYdA#IbYV?M!-3l! z4Tjq9CRUB^7c@lBRHJ%5jZV_+$`lpOnj1O&)Sg%RX=j|caA4OUw^Gep7gkK%$JP{( zPv=Jqi^oyIVTDcWro`Fmf_%DQU8Jyh93`ynyoL!N)Q zD{3;>i`IpdGWoQ1PIS2C^YDQ4I5On&d8Bw`*b$EjyxYwP)>bEWIEe7SZC#84=^$D` zSgWl`@*#aj*F{@|6U>q23(|(;c@2)GD(O!CQhvMX zSZfprxK%-k5>PIr7!9=ucrYip5saoGq-26w6DMY*c3Vsp$IK9Ekg(cP$hIlhDwk zoe5XLL~C^ASEi%f=**`q)Rj+}lN6>h(ZCvwr_G71_Oc1^*Jj79rM?&i%%6pkGBPdA zcsZ_(0H>Lw>73UdS{c(k6Z1IQq-?0H@?9ltz7VTiK9_cz>H}Kew)2IJ6Y*SHUy;u- zb0VHg8!Ga7oIJ)vguJqKq8&v0pzM`s4Z;zqYlP%Vzh1 zSw{KAEoaVNxtf>2Ra+umZM7u2bhtcoDk50Xj&!Kv9LYe1I-*XACaBtx=N!KI`m2LF zh0?97%ZE+Muur4_(YfU%7`eb+GuTwTR!FpwP<1e{3H^KU4 zcaubqzn0x?z*VDRA1FA=?l+D5fP+v2-{#JE zPdWVnn={iwcRCz4ccwbWvhWx1hR``Lq<}j0UQ~k^>WyP_pv+?BN}GtG&gARXV*CdL zq{@I%?-?qN_e6!^Cod7{=s2FIKmkU?jf3A#q)Z*RWUB{El=l0q+|-6Aj7Q zU@N-`nn0e7%@p=!<$seqVJA80pa!qPYeQ@=f$<#o7 zj6a44#|rhe^`VJW-}rdn`a<9O`uId@qHlO?h;@6c9+bc2%096`uYGWmyR0JQ;7QJn zR9n4U+08DOa{2DIS?^K{EpEwG%Uw|{EVr41vfXBW&URZIIwu}fYi`0!4i2$A(p_Mv z|8iFp=GkP^SrZ>p<@?wovU;VVx-WL%q$>@#vn&zRg@Kio`HnpMP;}1w#N9yX<&DO& z&B2ku$-%xMvS4&dWSd<6==|k`La(p(Eml+>=qKu&RwLZ{e#2M88zQJ3`Zoa@d-ReC zu!S*OD-{bX8dFev;I5&KbJ$%;7prp1TO|{!&2^Nm@}Z?*rfDxZ?#|ocV5|-GMcE6v zvtuuD-z|h$jDen z$uyPQt(wFdXg1$Ep|W}AZawD+F3t{LLF-X=ikH7)>oVjDh~h085V@naF>R~?QU^{6 zDb5OFU$#tiTd-6@vA;Rax{e`lo5RQI0X5P7+qAYDWS@>S8klLW+-#S zZBcBf7LL59F-qK*{EkC)Mu4x#HF)E=4G&Yo0O;cv5=OVV*d+@>La*I-QSE@@($>Yj z!+mNifqe1E7gPPPTxDssmKq(akM~WEj_r>9p zaLsLV95-tBL%N?m1rxcouBdrisIkUnVNY>sv9zq1OZBrOB30uH(c*l6x$$1^BB-TE zf&C^%N0yBbPS(d$VxKEZ)Q4+TR;3U3RVT%!iuS3&p#dHa*Rp++ed2y^y`PlXQz+ea zJLzsHJv21hFVaii-Fv&eQ7PVW?i69e$kfQh;Ode3z;;8U(%7l4pO~xvio!{4^isM<@d$eNK*fx*8u8M<)kY4Qdau+ZrU@JuGZc zu84CtjI@?TPaWnjj$#*)WLcfvnHfvR7b6PhEq|E&WlvU-`GEqN@6YvBMGc-~RQE8K zi*yRtbHgd;XPDd8IRj3H)N|$jVLBeJNZP3=qMEsXh+2^ry%Bz>Jw;sm#`^jPC)ay5 zPWUIRr7fm?A& zBrt!~IBg~p@|s9wo@iE(5SN58(4k6}L{5kjxpo4RByr}itgjv%5g`}XlxU4bYnmw1 zbL%4mNb-NOC@<)DJX1eX{i7ojlQq-N_|h`3U->F7$-H%=m?-gdq>`lF;<3@Onv*l` z??g$rNI2JJ#`&2@uyxMZ)WjO?Yh`JF%KI~s{9@@|`s_+Og*LXPE*!7(f?P{ev@)fC zCh~LvjzxVP7E)qAWs=mL-#=9E8>f89Ya)@>xf5%!91g8bj@DKUiW=R3r^?6h9IP4X(qzWAPpMQ6Upw1Ei2>Wc_B6kZoNMyuG-yK3S-b zte#v$YPi3pM~#jb;y{=`%BlDz95%XZos%9IDAo@z3}T6{O?iLVozKNVu3tEdcwQ#C zt)49w>ZgWR*2i;)NU!AYShwr#E@aTlql3b5$d#AM!0SJG1y(0T`|ZBJ_T-mmrS87F ze4NL$YqUPYTndZS6-!Nu-L?!575Lniu>OQa68yWSExmvdW)?BeNSk@V)~4Ew6`6@Nv)Fm5G^51!S$;FECZ;VoQ(Xb`kT!3|O{ULc z{7)z}t;m_i*Rw2Vs>wAsXR4^t2%V`$!zBGrq(5nw9~aI z_qW9%o8yIAIf~+FRYNMIOdzO>P^276ong*eTjjW@N(-#i{*>{zEeI-RhI<*L(6nrw zCq{V(N2YXgOE3JlY`z4g4oTuj+2Q`YY-{s-Rx=H9`y-22B8QA9yMI}n>I8MW5i&W| zU$L%j5m+o0v~$!BIqv?ps8SA#Q$s`aleDixLl%d+6!i-8-YG@$51U(6Jj6jj3{&X< zsJ|?(7pTE?IozpFU(!cCt>6?|d8m4(#K)M$*FKrNucf=Pj7OJOh~d!?_dP{<;9$1F zy>I?KWS(9rrvt{~f=PPV%)dJ`?)pftY`(f*iIG{U-U*66c1oS>s^_(J@SU?xMd+3D zF7~hXa6-J8;&VG%HNGX8tCB*6wne>BE@(({Vn|YtlKQ3yg!IBF2fc?@>Jw8#GFH`| zOop$^hzJ%r=~2um$z&C0PxEC0aX;59(w(D0y+7nhp)ygJdlYLwx@F$wDiyvmB0gwP z>ZmO#F>|YOM+%zHbgBfOy0h~7Sm$(JA19;~YpDW?J&ij5#e{4}z&&%Zth*4=(sR3> z6pJy$OfLOf>tlUDn*Cv3#+e|BZ>6fk-95z|1>MIfErk*{Ep*zc(_0J)Io3%5jgZ z)F`Jp{Ef3Z%iTR3impqoaOWOAC-R$Yp`@rot34&o=fp(*IP==dSI+C^R58AKU(A-4 zF`>a3NIAy^l^`(Js$_IDSiPyT$Bop~!9Wy*z&i&=!6iD^fHima}oYBpFu zl7}Fw3hc|MfCu?(u0)b#WGd+?|9JP665k-ok;pk#12|>REelpJw1}De<(4l=_b^YIbxM}6Q0jukM_tk!ny(|{Rv>-5D~~Fcc=!@Vgfmwx^>nha zpS9j@990M!YrbFF$)`Z=_d9Y{rl=Aakz>9`j_Sbc%<9J8h&Z8e(;?W9I0>cVn$TH% zx|3x+1ztpHebgc^#_H5cJ?F@KQ_aTO-?bf-&{W8K*lm~XqRJKs6IMn@j0(L#MXxZl zd!yXw1~kk!Gwz2QcRYnh>LKcrZ9Yhi`~#*L%Y92 z9E&Lo5otArA){~Cx(Mrfkkl0}ou45(TNk0!NI`M-rqUm{8740#VZ`Er3LQnJJmv7c zQV_fwreU6l*3__YZp6;J5hAxRKOrqyJk!5~Lph=2OX60MY+huFi@Bx19h@4{qLdqdQb9shjjmcXQ4cE(ts*f!pGwlLEwKI&DR&{s z)Yj_L>8t{Z*Xp}lpXV6;c*{?=#kkYPO6)`FXkv1(evBaHnq_3%y(+$5HSW zuIvp?tC$A$f}N|#u_5n|sgsSOoGwyo+Q;nrJPApmkVy8VVGJ)C7Im~tr*D8HX zvQJEkSn8h*$+Wk4Uwg|9S-XSdGDA~3pI92n4WUK2Y7|##82&spRvxdf#gmS~aoR6g z*_;((%4d0xHI>c+G2us30oZ+a5j3gBn(5P3Cd!AaS(cPQd6>3hS*I*B;ljT0Db2Tv zrq4;;yd~dri`^hP^V18|F!x5#A6qb0OEHru1j_Mz%dGtjQ>n2ER_D8O`-?1d`y0o< z@D%_X$G@<30vpLU(euDi;pO3*D&07~nG^H>1@Da6d;i?k#))0%`h|_-otc>bmFVe( zjQ^#gvQkX9{ntvSw_@PGk(3duFE&nDahB5xbMwDY@xoVfY@FzMx%Xd)T;!^djT679 zwIdtH$KVQ+`Hd?~Hjc;sbtwNeZ=+VLY@CROuW8vh{)Mi5**MGE0Lx`S$cuGAKF_Beq4t)t=mOd@CasJ0a*2&Ly&6C$IkoRZ3A{WD4M7UI~6 zmHL%LsY;xmh0T*WW{o@tiBz^KiS#PD{UcPPe4a>QY#0+KZfjiF z5^I{D(L1N)HAyrURMe7CTNXA-Q+m=Ow_5P4fLj)8AMVYd!Y2pK+V`#ZqSY0F1v`tnt`_AVME()yUkD= zVUt5{GYil(Rd}7{Y2>zlV+7#Sk^M07osp@D@$1 zA0AHGorY;%*x=mDxP~POpjdoqjDz7m%#%3{o>0XblsBrz6$2M* z%6+^RCdG)0v+%&VdW#^{!7<{#Y%KJj2*s!!4dJT{E7o4~Ub2>H}UzxX;=LM+VgD$~JY6gF5wU%c+6YEH;x$(!(rA5+D7} zjIODRSScr-7P($hIdR%WDd=h=DMc$Q)=;WIb#e3-XM^^Q^w;OvZq9daqC=j7r2Jeq zIKtQFNFn7L2UZ1s$YfbVcXm?f7H3#C{*-RK)NtE_>R7#>W0dM5|16KWt2|Rdam7Ub z*&levW$81X+}BIp6mSoHa$=*?*Z(J>mPbhq_lZ$AtGpR(wMoxbb|@Udhr=~hjEIT) zF)@j`RclQR%hFY=4PzA4h6Y#mkB!yjJ@zPZcq`*~1Mc6o+Cbm<;e#VAv<*BM9qVHa zRpb2{=Yyr>;b66zaw95Zb#g9V&zIh)kvmM1kL!^5AI}rZZfJYOB`CURa(sPFKX-lx z-7J_I(Js`F;PckGBfN6f)@y6zQqT59BlXtBb6V%L;-~M4|1Y)7n5Zmr5>&-QT2|&g zu_D-XHm{)KBQCxN3woS@svo|G3%keHYeD_XX=>c;7i*8aB3OSEME?7$(-jKy?*5y26U6C1b z%L*jb$!;9YXdFKRFeh5Am143y|mdF>dnO282XL%Q5zv~%)y$CpxztI*%(RV zALrT#!8ZrOHiCLH@i#{Lc&Wb;0^jTkn`yK=O|Gb!h7Z9NG-_sJqokHNDo{3b%pGks zZ<<^oGmR8$YDLU6^qT{48zCR2wc=$O(QpPPIl6G;<#tmmTBZ?+#4A{4pj(ay`b_J* zv;ciZv2QJfWkE`V{|Lwh6_^x>JaCqmsB!2-B9@gT_{MP-ra>R*^BO&}Sckfs7VXX% zR1GG8=!dtvzvGtMnx+kJN~(ot`|(%pmaQHnAVp1AvsRh)}`!}ZkI;8>mS ztkg4o{cGwyJ*2}FqfU#=lgZ^l>_Z|W#Np@ul#%%}ks9t>C$|D;2Z!sN#WXt7nr*z- zX1T}bZW?4WQ!CkctToDWj`a6#xYu1@S07SC$ho09o)IQ)_lyj(&|_FVkQ?*F-72RM z_pPo6?ux}R926u5za}R{852&`Xp&b1)zT!PLw;FjJosd#ef#soSRYgKdVH_SS=e&R zt?;0oje?92PV|otju9+|_+`M$^>H@tt!AjoeWE-JdFqR3h#OzD!`w(;24tp%IZ>$> zFyMCl^c(A`wyP4@0 z&wYbuLql?UsIxvI4(q0(%FjxPL^L0l`zHI>I90=Z zAP(3X9~~l(jWC3J6LcOIEYH0RK|<%99AZN^>sBJkhkBWc5ZV#lfBQnWqeeTU0I()xhBsH4%m2ZIQxRjHyH z=*FoqI?z7YH<4O3G}^}uk9$8oGB`0x6&PchhKe{?KYVO-Ts7s+AMQJFBDGdOmyQ2~ zdM`Qm*o`b>CL=UFIHKM_?8;6L@j(!gseJWq#CYGS`tN({YcMU(eQyHGbE(~HSA2hJ zzdQRTna>Ki@!uRA$%`Yg$3|7#;<>fmb~4oROr=EMItu47bK0v`)yJ2~PvMc$#=Rx# z!+8Fi+Co_6iDpKJc+JXoT$Vs#yBL3p&*YK4p8Kr$(4OJI_pr)5T>s!u*yG};QGQDEnc^ZxD#=JSfyRv^$ zoPEw06bU2~asw*r%n!*o!~{$Hh*zIBG&9upVC89WP#Hb^|EYT$_{ff{PV@q1F<=tE z#S6}h9|-UUlT2cDOPbLPgRw^H@ie0^B+Ymn3^%Rrmee!-rIW$A)BBPNhW04UI4WN(&tF->Gv z^D}%7j-D6x;4NS0U_h`Y)gUe4n3W83j}E=Yi?4wEd<*aOFe5#hYLw~F8yyz$8DA4f zPiEwy!M2HL5v5~+@21ESoR?T_S<~%GWLKJ-8 zerS}pR>P6ov{Y(!Mpj8rsShTkGKOdbQnR$RGzE(Eh!f}rS}qO26}OMZfrfIpyn)MN zt=uR@a(U21`^=axx$&L@e%53*+C#*?O*k|3|%lXma$A`XiGO zJl;pi@a#ZRG1{SgJ9f-W@z6-nPQCA5`o~H8h2O_BLoq+cbF+s=0&ZrZJOT9yldx0k zx_D8qRAF1pQhhujgz`MrY;AKNwry~2{5<#!DEIC(6@fEg?kO^>39+CYcd6QTVj`MF z?+4S>4o15simX{88_Jlm>3{;SF8*IHSDYq7VcOxiI<0fvPPOf#DYkvajHaLh&8{(2 zS0>72Gb_|Q*k@RJ?QV#cclYFV7+xClY&6hv+N~A^wIRueLBb2dyATlf`?OB5p9Ed@q$rJPI__@?! za#u{G`TC)0hWXs_xqR)g6f0*#Tm}@cAC_WgE`j2|+$V}uI>XhK@i$jHErYu)hdhQP zHipfb5G}ZeCWYeyvyG#NMwhLlF%7kx<8bE@8PP}MBl>6`BRciZg+lHy&kpcNSh?y} z+pY3Oh@I%iST^^QmuOoQjk-_++|T41k!E_p$;E0-v?lLo-o{XmLIoL1EHoiHiWLtR zHP&3G=W%B`#0#$flV5*>4C<8QQh!*<=kjMHgH06U z(<4l3X@=x4Gx{Ev5s})BWo9!xvBC4cRV$+uWqBPueMWr5} zu?)>t=E&1`v$G8=2QfEM8DvgRq9O?mTRe`11{Umf^fQe4sYhGUfz1&76koDAiJxL- z<=AJL#;Xu-k<*qZHgB+5PCvGBIS$iNI%>pr7d)ESlb}<0?A*S^y4~(ALvu$hpDjDw zwqWrcRa+?gZmKP$Z70b_kNOe%`mx9x!b0}0!J)hZbdGYY z3!2x|ys)MdXKjHp9UrAxkJy?Qsuk~`4aeY}%`UVzacHaFbpOs8PfV4sbZ`vR;+#3rzu<} z(PTc!G7%l&ir`VkVtNhl#E>OANI0K|ge?VG>`(NCRN3zn{lwC+v%VkMLNDnle(zO6 z(&4IAK8vJ@-P3zb#I)eIAN0b8{r1Y8X_arUv<#ztdzPBX*Pe+PjO{g#fgQiacwx>f?xbg!5aZxJkc$~1jNW?Cn>Kqb#i;ea14U7>G> zMle~eriWN7)w>de*0JTC9gkYA(XL0R1$q&okgR1E3r&1^qZ5KVn7DN# zA1O6brzq98OS@fJVXmohqIFCP?$uO!_?Ok>y2)(1pG$svguq5ymn7LJsUV)Jr>L`i2!4C6){2x$Y)zQiVKA z<=Zx)SRVHD6q%wSiEcP#w|bjfhQT!CYR{P>$)rjZEAdjrC=`oeC`4xI^g$<+m-5zm zgOk9L=!n_;GC7G-$*LTa)g4@N!*gy%+P3=P1|;(^y#5k|PV`U-#^RZSGRZ7C z`COrJbghb;3~s4njtVy6`{7b6$siQ_WDU2747nt;hH!ndmQArw)^JOsFV+yQPu8+2 zX0bLY1=Z^G#ThIrf?(pj1I5`dy)3@8qV4p_7{X<7);Gm0)mv4XR>&}t&y?@&vaSiW>$XM$e;rtbU;$q(j-2al_ zkJQh?KM2ePk}z%B&04Xer1lUsp&3c4u|@i>Wi9I0lxT|@j7HMbQ>gT}mQ9sIe8tw`pv(5b{j3(id9@zANnL#xb8;_=X_#6wHVOycp- zsl-F;$xPz$&`R7}cJ79kod&_dLL@FbDFMr}Q&yS{cY$pQz~ToC58`s2966t)-B$lZ zTCG#)TP0_@Fb;@s@^XouS?lpM6P?m?YaxO;J->AnJJL24CFmhhTL903r}aCl7fjJ} zNo*pgaDLZ;{zBkEQx$vCIaM49!ct^k#X|`qk7Czzj?Y%Tbvo1;VTS_>-?MoCr%DD~ z#HbCMZH!n1g`JnMamttnKWd^`XNyv5NXEUXy*X2bF(@{s#u*egNTpHCQn~aq{LYFz z2u1#6Zzky*8=N_P3Kl}sgFpaeR<`SvKRfVLYO9{12I~c370f{IHSWztltbaiKN;GU zWW<;PKvqCL7vaj%A?bLyyNHUQqNy?-N?3&u6`#{N_^Fet+lgw28Ov;bn$)Jdm z6$+DL_pQX4L?EKqGic6Db;v~Jx1_WU^~b?;Zl*kVY$NG`*)~)y_gDi>rOG{$>Qj9( zsVMr@mrRNuRgesFlcgD%NHP2n^4@>o+J#(pSw$Kx1(pMxDjiQQTC{Nf=E!Asy zEv6a^4vTf{P)1|ku=dF2I5CHjADA3SY_@?Mi){Mhb-4!3VHG(h>W-Q0 zQk9kE#Duh8yAnfq(ho(0Bs{CBelhdRO_~m|AZ&(W`H2}EhXQO}n8qc;p4t%Cx)<`= z!s~jZjpFi zd_>6UcFJn1wbJeBFBVpk;PW(#^(B@X%1bCNa%z})XVv2{9sn;Fasft)f%-ptA%@vX z3#3&ro~AHW<9aIf+RUAiK(uA>CuBC#j=1-cZ1{quFa~oGqR}rx)9_`Iee|ma-B=GT z84vnkt zi)S$ujFZj|#$03v8zW||!Az#OOe1oa@4`*;cVWZ!odn|AKAfDLx{0`2cKvrXxN=GZ z;zw9zhkznC+?(pH)lywP`r8UvzU9WT;MQqNtK+U1J2jS1v`1rxH(EyATjnP1(6cG9 z>PPRLIPH{;6__CR_IZK_T-(yS7&3jsQEBf|sJAFsRA{x1b zvnUHUUu1>|-eSr?HZV>{g_zHyU$kUU?tI09=V-AU$rOUdJ!x4GkIfq^p#qS|;XcB)vmTVGA^x>^_dhZ_H+*;M#2bLSUl>_ z>$4I~AYn~2cl0EtOF_YDHA2%L3z|5#(Sx?1qXoK1hZEwB^TqRyg^tmIhDg}_r3(dV z;YGIN&(2M`I@X<8rosQr{OS3Ho9CU_2fWneF3iu~690mCno_>3EXX5I(S(Q|zVRbZ z(Qia?dSTI$7R8#KdSh|+7I$iPc43-cql$Ahb!KJ3?&~ft6;2kG^wq2MFsBxlF)@hp z=8upaS?X1EXz~Ujbz(;Z1Cu_3ZW#b_-aC2$lYZ+R0BZB&gbql-Cu#f=|{ z34HNXbSxy*@My4xzg*UdK6Tr$)`vHBtQ?G^H=rPL7S*L5fEr$9z1lm^U8?bs($h1m zRk?0y6Ut#c^3d#gS7EW25x;n9i$>jHFpksu$Z~gdF?R$!goB0KIWMeaQY3BMY0GvB z-vn**BWbqeX{-&?&0EtCKd?us<*5sfM2+*5H@2^s$5pyO$s1V}?@a<_*v8(dbd9vh zNCrko3iLbZFc-Pt^rR;UOpJsBhJB=^lCe`_M~~@XSJH6NAnP?2f4GHIxuWT(D(!$; z>sL(+tu*5M&zj$vKTZ0shg6;#T>Qp;2V_zeKCA2V$6^EttcEgM4ES_z4~hq zOo(gaK0k^V;bJF~n4{Rdi9l?snIA=t;uLtQ8uPZXkUKiPQ7vDfd``DIolQJ}(rTLd zbS&SkgeEwkL4?)qqn%-X)T;|Jy_Wic${KX(UI429=y4C9#a1nm^Y`Wmgsi5n31fcSuVyD(n>il79)m@Z2k!E zyf4s8Zn~GQRf_9=%oM@yF6IxI+)PhRpDMTuiv_&wSzMTRQmF7`C>lPXadP&|@+l)s zasKSoY!O>NcvB%hfyhs$q|_~(w@oP)Pwy_{frml)#!CQ}^z*cJEX{AFWs2(bR=jrY z?mG71$)p34D=n`>h?m0RmQap|wo%TFpRCpEL0C)G*}yu%3}PcQMr+9|MG;-oWvP?c zI`A>tm`K$kktJ(t5F6w9ToLa;c4E62m=@6FL^?9>oUrRy16%1%T7$Yg0+wlYY)e)@ z4?=Ky6NVWYG>z$-UsvK-Q#oqqOWy)bTYiISmxrFpc(R^bSYsjs%Wu7HHD}!Sm)kuUsm`j!8d2k38*TVCR zU+$ou4Q>pf z@HVpfQ6j7P!f2jJd3BWuM`IA}*kpS=N5pv@-p7d0cUWs$`C!d&+bcvD6VOjMV~Fw3 zd&{w@1gj)iM#tUVB~&JhDgp*9I}uxf+n2&qnutb_LRS4esBi%@E ztH|#aDOh+J>du81qHbZQU9Xj4L&9G=(_PT@7Ay&GG?=LFH8H86uG+3K(iX*Q*G{TEcZ}O2Cj;?R^8InayCbE`$0omdr_ipCuh&fPg zWO+M|TWDXgCQGt_uNLrD6lQ^aCzl(oRQ34b(?e64 zs5aWYT~sw{$wN~a@8Xb4hzw5y^}n=78WVW=qDc)0xf!03RhVVnvqXa23@=tQJHt~M z5Arj#OeR)gIrcj1wC6LO{rE|31YP!?!V}AezNI^s{Pl zu~`OtFm73@FJlLQSd6Z|qWUHrSw?HVp@7E~Fnk-n+_^=muqdhVO zHOS=XSR9jbFh^U?5+!gaq*Cn4!d|18PJ4_%CBmOgN>b4&CUx1-T;+4bV{GQByvF>k z9Q3KiVM<11snb+Xmfoz@%ID~*Qa3i)MNFHwmCKyuRHnmT{(^(pxMb+GK?@j*E7oW4 zh08tgwx>wt&$%`#TyQe_F3n~;MqZynJ^_=t~< zd_Sgkqbv<$?l(#!eC;M)CX;9m{>cr_qp&ieZR{q!EKakMw9mGDPHF*H3s?c}>k+vv zwQ<@nu&h39VRHkV8JjIDb~44oNL0?bNu0Vxy3WEX9mPdFiM3cU{#_IMTFw>)dQw3TU*DxZp0i68IpK?qOm3WG?otUT!(k6=q`X%>c&lxQDw4Ll zMl`Ld`ulx&VWJ_OLBis9clZ3_R>H1dTGBpYTlzQ=hLU~F==(^+3O4|-XGIrhgFaFH zlbB27&Bj_nnl=V=IqAVfG`ow~E|VJ!vBIp}r=$caJ6vyK2^jOT!~l9@n@-H3 zp%&#kM^ZHt6Cs&C$;sntV7CX7h@-!f!ycs&a$GE-{S#t4V;r6vic;s_$@Fp0u-oI& zLCDk3LEey7wwuEVU!5T!Y~^qK@w97m6$O zEEYLFlTQgtb<(6Db<*@nOUXW)PfpJ7SCWG;_d)p!KY=@q8&ZWs$ffFVjq-b9bEWlK8Km5MV$49v)51Sn+`8bo9$n-E-(mxhuCU7NTyKubxE$xhJ5nemWL(rZ7_!HFj&_ zx*Mf8f~89jDw8z|@vBQ8>v!HD@#>&Z^jst_y8v`%YW>donz3RL$}w+y62W!|H~Kga zkt;oXmWmXt{LuB5MLecj!?d@lGrx#WLd3(kkt1_YYAoBj)e!cgsIfYp%V9?DNzqcT zw`j)heYa8wruWf#3(fa!X-?AQQC0-eD6Fu+3j|xl+|lK2wLWC_2~i@8xS(eVm>@cu zK`=GxIlP*RrJDAy)}lI!SaPpeBIq4sYgqf_ELIdmQr1L5QrAbxB&m3-73z?sLd7WN z#(2)cxkF%t3c0O?g8_PcXJqFbKRb5_cbfk9eCt%JBqa;=USDjvs{>7n4fXa!aNF0qs%SL6e3=Q zxy6-RB7RHQNOw=1If-k>kuNg~XHLu(+~t)itP45u`OSKrj9+$7kOG&trwgw94|BCj zrLM*cm_7fCgvQv*3dgnyg}IAMR|pfrcpJI4&pfs#CXk~=!1AU)XOXc~j$3WiBB#K~ zbZOEVELC*DVA-u8_7bVcZZmHkC$kjUVoVOm3(|lmZ`+jSK8YOUC?7PTqi0}YIk&G! z!yLD#urLODd%?v{o#X}{83o+mVRfPaaOB7*c!Au!BS+liu;da; zVp6@I-~{9rzPY8R{MmWaUEw5}o*F=GQ!Sl$+P?|1ON$|_A!0O^^0eQ1w$|xwmOSlg z)F}#{UWTj*6wvoJC2VPA5Hb{{kTTi!k80`}1;Y&tkNMSIwPqgPuI7y9|J3X3|` z!H(JfGiFkyO_88^NLJ`1L`>D;-mVGHEl~ z($xIS!d#z$N%l9xywlYDGTGxP&ck%zR9a>sSrqb3x3HFqnk-7@D-?RAIG2`(P}qf& zCzn%&>4!d1fW%J1lwTiQoGdQ&A9!wR`E)|v%@tDE5vW;chzzKDriwg_0UTrAeG{3Nf zHo~h^H3NDR+UA+rDb$Wz{3>q7NvuI;AWOwDKU?zWS$k6>t-Arwr@hi#Tq1K7U~8su zwm4mI$qtxrbJVlk>4o7Q>TGejNK0*Qs&P6TW_saNVToj!8i=fAXg3w76SYj1L<$*L z!5%^~AookXUI4gNfB*gBz2|m z+bpUW6O+ibaWY~#AhOLZ(77a zbTG6-nS;V8@ks7q9_ahF*P7vpY&+!ir>CToos{39>XMmnOU~{6Zvsrov@GSz=ja8j6BirAN|jBE$9;^JzKFLCewOfG8Dk z@GvN6h?6Y?Iwc~OW=ONkayBGhTva{oJG!{%MRDjyP#uVh(|}iY!ee>~1x<`}31Q?> zE~SVY#w7U%73n5kxT1 zk#JeSiO0=OP_Dcdn+v^-YKPM(G#X3u%WiHmF6uZdV%*J)o|$AGc0L?hal{_cJz1j5v{88`B*URI(;CYqq;;Z%7kbb<`4=i z@f4HNJBiSlX^kZm7+t!lHoLX0YCOTDq6^|=CY#Tz`8HPfp@xa`>{^-BfFtdJtLqqr zsQNycJkL_x;Zg8e`+#&83#WsJG> z5T%WZrE?Fq_po6Y-Du){tzcjG!YKmpm1jj!L+V%^EeKBGYY7=&0#w4+&>!9^N>iyk&m+)Y8Iy z@r_0~#3JCKC*wR-Kht$5Pq@|?d|oMAt@Kb_M^*yZ|r${oBaM>c;rYY zMqUpwbQ?Of$DF~<;!=?g!-C_E7;O^E+*A>_<>#p)kEe^n2sx1+(kmZNE;kqF$5JF{ z+gjEe6nv*5s0`SGmopuC-Nip|U0#|Nj*362&EY`7R}ymkUaH74n~ z7Reqw4%ToehCH(tYw&PgC>_!pm{kMvqylkfnonv@oGH%E;AnsG%nFF$bf4Z2qH1T& zUIG!X9wkN75}~e#SX}WBxAdXA5Thb%yirJnk|yg=z_ij^o|;=k)=|*Q(@Vug(hJTI zEfhZ%&Me{Bvi~THFsyF`Up9Vcr8ZZbURuDRFv2Aj209-)1`G+xk7kPB7H|niaj0g~ zUiFY8#SpE;He;=M$)RlpYV8T-#veW2V|Ii(Zk#GW7}|bq2gm$ z_JdVR-QWwwlP}%sZEm>>+jwpTSIKJRyGr{)ZkJ}9-m1=9=(;Pj%O^dR^jT;`%_7I> zMV;k2418XFSjI%b*~G=S++nxpY+?ew-mF$ex*KGKuQ-oR!dETf;Swj5uqNZE6sBjV zNTKLd@uVCgn%;$FY(DWc#@n%yX)Vp2;yyf2#ugUuC_xLXfww+z#Kl6Ku44g>Z3ldA zHgQKj9EKEAMb#Psh&XxRC#U95qewioW9EC%Q6dKk$?MoL_Par@fYRHOTiyeRP7`xI z-$7p}G4|yjG|A6diOOBk$+=R8-|>b5q4$%QEoqUCQplCkg({hQ^?y^e)2WH!rZX{x zNb(~yp-FF#3sJnE*^yvzkfne~-p}kvo?0&=c|WOiWE+!;Wq@)zc8u;*aG6;TI)@{b zVB~s8VkD9Hndv<1e1}LD(<@M+aMaJNcO7$rZ%^$%^~|H{y|N!U(H@W4-`-;aagpFf zNl$7=VwYGCCbguq?1M?oe&l^XX<1Lk4<;@1xY;;_Jeb51&e;zru|SIE5$(YwW{UCP zQ#0gv;ALhWbsuYGHBB| zIoOnjSh$Fehd%B@cU=ct_aOSh^3buzc_mD5{P0R_@Z@vjatJeD-R6}h7Ot@Thr>>G z;rC4)+z_mn@L1EXSi@q)W&UbdO)^5U#x>oL(w(&89POnWS293UE|GgXCE{WY4&!Hu zUO!@^K>SMr`>!A*q)nR#tR|Ae;L#$KHUewx`kUmJ6;qAA()NUy$KvC7PeZ)R_w%!8 z1Sb5F$Pg!)naCYof*wW%cQ;8)-c|I3Q`KFnAC~ZarG$&`qW{rc!!uqz*lA`XerMW+ zrCt29C+X>oMC0@$<(#~oNVdnjg$-B_cUn>CU$nF(w^@Dcx!fGeesJGgz7 zcJ46Fg=^i7fB;Tkq2pqqNC(5Ako_e(dShC!`W|&N|(+C+%u?Gp7jM(B#5aVz*OzcVa^JGcV z-glpmZ6xJlpRJc9>Lej zvm}8`hQ$(ltby#6*hw(|-dTq9Z*-nzlTeC`&5<>!iVkk6B!S~pJ0*$j>j-7bd+(f& zdC9zz^31~$}mJOw0c&5^za;8&0iN4XJ@zHk=5L4b6uuCX6vBb}%(d0;G z{fijm9P3cE|^ z!+L9}sq+L^1VNJ>6~y_Z$!(HZr+^HfI=6cr96QtLU)0i()f0DB7f;T+TNhB^vbvEI zVw&66aEAl-pu71X2pfe>4nc3Jm{h!&_a`^Ji418NJD^tH;?hdJH#-Q8q=UvmXxNVr2ci%hXALO}^Neap62`%k zd8{yGK87V>W@K0zMoxy5g?Yv@B#GeQWY}yZ9%2ntB=$S8gOD(VF>GD}X$&dOl*6Gx z@{)YyGYFOFK~C)2>LA22Zom#gD*4c55GqMWDTB~RI3O8>MAos#AhgWGj6D@r(hX+=k$y;F<~M0+P_ACvY@*ET%uovJ^^ z@ay$^r>Eo1K2eJg7klS0G;-{noIi-{6DeL5>pmr!kBVNK>gMGrmo_l@!a+QMc+=H_ z?%{D#yf+m2T^uJ?JM~?>x{4*Z8*<{|Ft^rd*N=C5m1D> zs^0rfqJpdNcmN&B9Ar^|F4JlHy zh>O4S-J$Z%j=NrOt(NNO20AWIl+pK~2GAPR(D69a#aZoi4mq#U2vJ+ zDAk&@PmJ9#q0MAj^5MP{TVom`C_CB*o8_t#CRQBFdx4Y4*#JSIkAu~;4!ON_v2SR#-Vd!3aQSY=7j(Sql(SZ>g(q(qY{X7KRXbUs-t z!dfA08?RBKM^W+17wEo7p@2G(sCGg87<(BhWGvg4IozlK@ukybe87^ilys9a78cD+ zUOKxyaX6FH9y3B)z4N3H7Jf#L&(!Ha$xEL*(P9$=HyNnF`)wDh6+UnpONmUvU*S=? zz$}nn*3!-LcsmJ;EcK|{V=gS(Z6!EEwkmY{0C2yqgTVd!#Wt#xMG&hg(YMaCs2%x#kD_m~9#s!rrC`tY2d? zNTX(#LY%HwpxmGHl*yYO&&WIs18X2RQo~{ zU{`@!6${)>BxmX$6&7iDK^3!UDmI+0MhQ1<>v%5GEz!x*;QU1?49eeDB6r{%HegFT z3BsRW?u}syGDhalCY#mV(b3#!4zfpI4e^V$7#1F``=*p22~#y0BL!FN+T{yXI#z+? z20nr4Spn_3kz(jARJK7*`Eqhn4d@M8sDAA=$;Ju}FR`ST1+t23%gg0bb8h(r<(^0R zz)N%uXb1{*2l^OjcCQd0er2w>iD@AgaXe%VGSbIm$5Y_*Ng3cp``Q?X`;MR76?z>{ z;=?>hI#KB8DKm!{IoxJzHfgc_?6v$RBMZx)Zg$SJe=ZRE9aKdSLE=Z{b>^0Xm=wwS;ReT2$L z0ds1ewk=NpS90`+JLEpOzKN|8b>d78$T(G~5N{$BC4Vryi^O@WWh9#Xfb7!%WMQ9D zSUeqXA%Wn>-Q~l36a2pLpCSGMvmj~|q8b&e?NU{GVFL^y*@*@6S?-2RQpLC+6S}Ic zC8L8{aZ$$$4eBMh$L%gOmrD&+bm#7wf<`}(vc619FfPutwwuAA6PerO5t~Ow3T2Ls znAvm35bov47Dpd_Jk_VcuEwNwk>`=mL}t>v*5ruXX>TRXjJ#Pc8y`_)8Y8s(i1F}n zG*)pji96LU%NlcO6*aiXya6epn6f^PVi`s|5Pa!ih~!0~xA+}}LV@li&9oQL^YTpR zIBQ;oCq1YT@tPDBECPjMWeYZa;tP7wR~Isvu&9D%7Ot$!bmxGkie{SGS;$ln%VZ#; zE0z}<6!E?#lTcbCr0&GiFH0>;YX_Mo_L$aL7N7B);ZmJv6GJSCn2Iq&k_(--8~yQ+ zfLv1p5p_QZv;Q_93%@ZNxyEGH@?1p46OXY4zvZgx4eickeonr5D^wlZ$kJR2NVRqEOUIVNzU@FA#F~?5^ zXo0}H7zy8#wrBRI`guFsx?49J-@t>Htfpv3bb7M`4GdgBsjOFfGk7!rD?2&ZGmdnS zYxd*)&3KIHVGd;^M~h1T^uj#2h<=EjS;V6w{iR8do*dSMsCdg9%RxV~vvfL7GjdZ8 zBRJ)i6*6x+R{cHk(8^C1e02KhtxJjttF6@C+Vq5Q3|B36Y)K{UvZg0SB|s-RW~SX0 zl4Ghc?9n32LNO~MMHst z7V%W2BK}Lf(`|%9kVWV-^irC6!gnDR9F0TrREeyF=DB5=H?dfFw9CTEWLoo}8nr^A z^4mhFVa}D>Z5(2q+U-I+s@kn}y{}B-=2~+sFBU!6pH6VOeTE2n5>Ma<8!a||?4h~Z zP%=i+T+nn@YWSt6sb6PveYZdhhtb%njmjE7vvx;gVON4POI?o^AFMy14dx5dgVA7Q zoI80MFVgwOV&ybe^n-Wq!0TZ%9hTdc-z2Hg2A}ahK&M2#eZ1+idVnP)8n7*kAS<1z zDEdsd+H^QH%X|CL4~gd=i>J#@ce~UMGwH7A-cY~|v8%^{;}pR-50qV-AgH^NlbyG~ z3%gXf?Rm7y#$epqTJc<|)a~SRwJr=7dm4*$JF$j$r^tSIVS-f^yPe#m7Y5dQv7JiR zWQv#tcp(Z~g>h10#4E?x4TniiFE`NwPRW2(1!|u;9&FOK!DSK45-gA*!9qmfHE9Z9` zjosDrYrCq{NG&V8YiIA)2055ac#p`kc@ zOXvtt0Os`CF1v(`dm})!1lb=vm?tPyIb%F|M_rL48B4gtgPFmK%6dBEHF{gJ$sJch z!u#Cv)A}kL=_W`OC%4ztOO4e^X{6K}xt@$^cdu`@N*mWtw<^`^JFR-PT`%okU#{1% zh`7GiEY~;Tzjc8$&D|0m=D%U%ST1+?NbcAKG`@}A8;Kgd_hNz+;;lM2PDjuucDY+1 z!xUSEv1{GYLSYNaAcyIkj8e(d6e;NW7go>XTC}=b9bfym_#I29SLyH~K70m88Bm`+ zE+>?$=r~4R!zUYoPUs>`IiBp`{b{;$&HDA44!=={o$!b+tDln2J~ue-5G}-tXh4yO z6bCDrh_6o$?GhS5lz(L1LNS9q%wk()^vz@*uP6+|4{erd=`%vgnt{n>p0W>2F>>mjOFDHQ zm@qY+BxPReRD)Ca2kksIt+|dTeTl8Gay#Hk(n6wXn)Z5~= zk_`5b!G3y6GS4FVI*RD?5W+ORn-5MQe0I(684~cf^UXN;1|ktvw7@uG6R`-LCR4FV~tu4`{o*9H^kl5Mb$if?nO;?QHowo7DJBGG@CosMuTF z6nEON_R#E>pbODzmg>c3Su9c;2BD;H4|-YZka3MeW5bw({BqusMvt0zeljgYF*cSI z3Wuj#jYf@@=<;JYDo6cX;qnqT8GWah2de~zOUoPL2hXeDKgZ5>&=%shtTPXFxHH|8 z2}OHYsU(F~L5 z)Qj4Ji1KV;J3aH*bZpE{C?JtXCi_h*=xMh+Wh{SA$DN(rsb}?8|AQ+hyWx||=s?Te zOyV{)Qd5%>)ZN4=n|)4BGP!5&!PgS9a#`@DtJ8B7wItl!KH}VYU!re-Q0S;1*EjMs-s$kd8D1Prny|Ed3S+u&uJurjpbASCID?PyDs`cR z9*q?CXX@G|L_;zYae`^yCb>vB^ZrA$9+}_l!yz4#heHiAB8Fp1?KJC#<$?x=@}%LG z*1{+Qm6}PLyo+7ar_gBkcF7xEh583A(CdoAAGiZmFY4O31nRhxV$vl~^i|=QkuEDp zl6O)Px_FanI0%oA(a+|n1?G}I(_$h?wcndZCkG&72;NxDynEMxyr?|7hQD}mXi=p@ zWCO#BDD$grGt<(Xw?QZSW4U~HqtvNZ+{Pxp^)GcVU$b+ZQWj4hM>uH03P8pz(Fz5-gK_+bf4gd~034-wpo7fsi#Bnx9esk#O~a zb**C&_E~{A<>~-68uPZG7QU_ahW3FI>-coMCelWvUy-N|`FEgf_W_jyKMW!miA`{G zOJYU8##mq}$M0BqHJuj1#-%1NMV|$)&Xs6Ei0U*;Ns23R*lCDH8zzYBO>|U#)ZCFROo>9?2#T7(8>zu)PLDc+d-63IC4oA1NH5e1<$a_WB?D zDXRB1wRiHz>3&+G-w^r=OzG1w6dD<$?cLa8#fl5-)nmt~FVT5GGWjx_ z;l^AvGQ$Qjo>CiDdF5cszcCTb;a9c0oBXW-5%V@4D^y9?zmbZi&ST}&)PuS*(P z>ypfcM+2<&vG9njIc|s_Tn>_luxv8&fclJcRb#Esv#+Rc)c!)s>FUs&?q+aGlh}ce zB+)m??!n#<#FUaTz*$fFEMqWTz4E%AVWWHbpq@+43cM4Gi~%{$PdRlq78%6Lw)i?> z5&b(`0Ej(x&S*#>ApsuqHGx>3y-q)qj7949^gN2m+fZHsSmsc)w-p!^2|bI^zZ`~m z7K3FZ#w`s=3{HqMPfcD=B>h1l$P`}!l*c}j2Y_Tb&~3k@9n@fXYEwJwb4lctU%WE% z!XmT_gV|T#wKE<&o`RS#=faTFccU=+j_;L0U>y5ot<&wzw|ZphnFi!g9)>aOJS&u~nWfCGM|HQ}aP>O)*)%3==KmNe0XNXbgT)m+6JOk!VSI zwkS-#xGPI-73rlgSZ-q7Yp5nAb*u3vkuh<$Z^>kt!A|QF2iRK4O2*g=8Dth>TU-o~ zl%>xm!yB^g0wX)w`xbjo%$thDdD&y;1{u!wm{gp1v{>5BpbeUrN{-${EWU=%DkL6b zxB6MphfWhsL-Ru{@3Xk-Z?TW=EY^R-*6dkq54?cS;@xb{>2th&;XkR59Hv^U*weYD zzBD%B&$8`0NQ2Cx6;GY*siVcIz0+{lH6%HiJ)1j^T^x^AEvKVY0(#t;n+)wqa*SXj zl60xJ7SpyaSI~1HGYFE)H;e~x3wW#6*aKaw_R1S>sZvSjCbB>2#|m?VWk?!JeDfWD zhTuu>y{K&Z;08Ol`^rssb9D#_A=Uv(D8y4A5(;y(HwnFQo(>;}HBZWej9Crqt&&j# z!+FVtH`*zvAymo6>CAd(t8}5W;Hb7PAM6H4tkKUnF{OJ;I%;5Z+CTH9Hk@=G>`hY= zzk9=Pyj!%}mK&%yw3D!Of^3)1zWDPMIxZo0FAw@z7qBBOZJWlhhDGr}=o)d3D(Ma;cvaHm) z?GgsEA@gxC*8Xx6TGAeO!{-_EB-&x;38tK0rx*k@OMA1q-6^$QvI|XHIQEKe>XjK0 zr0pce!c)4{8xD<(8l{~>X&VHL{3cans`Z%Y7?I@Q6+1e7?ps)CB8X|cR!!HIpAl@8T*( zt=x0BtECHWt5d1c1K-?qz?~`xw$}5tFvEikF@|?q)Hu`I)mo|EHMI$;8}?bmZ98k{yVSS5lIr*D zDpkNObXCifq-;qEO;Bqj1xXj?sBverh);#wM5VT-*F;@5stgT3w_aN1KM+11%5&e?7sKMd+0*#o4Lq+U2m0&*jku9uQT%q@2dPp~aeUG<|Tb$7{?1SU?U!ya=RrQOx4yNa=~ zyg_=iX;Ro%W3j1q*z9Wa<4L>*H6lsCd#~29Ub)$$ei_ac@~fv7r9@M#IB> z^Vaf|tN$IPZi9M^@qa?{kMsIZR{kgmzX&G&2^B${*MG`I;FUtH3A595<&T@h%b^1@ zCwjOF5N7bD5V<*!@(jb25ay8Un`cC23bFCDLgsBWb<1P@Oh(pOZeuUADu$UjjhkaHgxeGK!cRDQFIIccuk|I7~yIqV}$ z^0I~3bE{aXRC@tq=}q7z7aHv%L|J@kuuAOX7ZpVC$MZDS7PP(OgY$m-3SCWmZBuVvfHwg`xXD+MW8-h>4e{P$vEnghErS z!6p;Dfk*p9^oW_kapFEvY|jD@oaOPrS;w=^R-9R(-QeiX@DiP*kfH z#6|$y8x(&*R@dAbDQLap$<7|mf6!B)2ZUe$d8R!KWag$+S5%+aE znAjyJelUtQ2RjEb=8Ymdu-)$1nu)h$QI$&XYGNgCR|12J4{c_%4r6t^VWm4A$NzDs9L9`;6f$ib8baNB-Ghw+ zYR))KWL>5uSeOqI+V$ z31wQRjLpRfcWIZeHN-aV{MqX&uF+q}VE$#(2*yy@(JjbGPO@dT56y)4%}Lvj_8~{n zR&aK)yF9~lrEslqX)_!~yi<42vMUGcB3LPB=QpK}(2$M#0p6jM#nW`!W=&eLL9fHJ z5$~^YkNU9;MiMs&mau#4Z{|{!_SMB`;R`j!a%q>|N0&Fx=fM?i`KpuF_n3JtpCB=N z>N&jxwVmFZ1AYqBjcJ5cY(4~wUecyBK7^z6zlU;tMjDq;s_Eq?L&j$qmjL z-L-YPRp{$gI6{y~**Z_E1*>={&&W5fLCKSOVu59#xC;YC9S>_C@eB8`h6in?Gx^jk z`_(Y48JWeZX=J9jg#FW{)!2R&ZB6AyUA7-P)miTrNVygpSX2TrRY!W>n}&2=0=8m$ph=DW&svYkMad3 z5L>X`(SZWk${v#v1=ny=NJqT!qt~K4IFwRCmd&vkjWGxx&@3jX^YQz+TKoCBYv^K| z#hTazYwP>3gyRDKOzoxst3k3lDD60ReCbQBu*Pj^q}u_EJ&l_{ zJ>`G$&eob2sujMwmGtbCU)$o>fVmLN7G3cB3x3Hb(p~tIGV{7ux`b%p_Bx4HvEp13_jHw9YF_4NCJV^9M#a+N^C=*`KfZt8VP zI+vm+T&%Y|WnqP3Vy#rGZ-QCA!4MwBqF;l=X&xNAHfrm8tTvi5I~cyqvZwb`q(h0@ zgRwmhW9c<$awO&n_C~_9N)vC`@i%IcTD4t-8AF>{mBM!?4Xet*^c|~rqwYm+P@&(?EC$l5J=uAU3p0T@LO$IH2zu%bNi?+NS@Jk_lWKF>rFs!0D&BW+X_z{q)G;Cr6GT zH7^d6^CS7LASpi@cx?A@#{;vwN~yD5Ytnra@u6t2qla&m+7|xaw%O`AIK_49;;t*= z5OmF{v^H1kRR_Oo^ry~$>JD`}y;j9h5a*gW73vj-YlBkYkaY=BSKRcm?$Dh;r^|oK zB{r7K_JSMoyHgb`tx^GECvvCObNG*2pr7kd!)fBcx4YYgbwx*EwF_hVe=Dcy_+5$Yop$RWQr=@%C;`|&UKYi`!-(!59sgRW3rr(WNJ-6I%%C1c*)92=$H24tYu z*f1`W$7HR$R%>GM7-X$bbv(~l@QE58{8)>Gg^w*@vt+!2$3)>13kdy@N{HZM)gdnj z$2_RV=qtD*z>iY7)m?Mw#S#4NmGp0Z%_&!Fb^N8T__MKw_Sr^!NRz3wIshTr;V*SO zeuMzH#m9egqidYvn$vD=qYoka_yaoK+d929hm*n|cn}K&@|l=`tGr5~=w>GM{7AjC z#(&Dp2>3&4jU0HW)EdM|iT~`-UrK6Yx7|Yg8rWF{KkKCig{aqY?-Jg?)@Bppz~`JW z4T>68gJ=knF8*J0n&^C1cR4yKdz99oSVF!#J@gZ}E3ihLZVAuAkT-s!9|m%GCDlbw zxLPMU&<|iw*}_!1iF}b9&?7?()GwFxZ@$h&fxq;XL{+D}*SRS0x87=X04NIlrJ|^# zixX(A<2Q0RTIZsuQ&FIgCi*06{6$gJsVMjlJh&)?2XawSAWaknr7T5()H`eZr_79i zKQ0QSj-nt=O8jSs{!&t06m=;I@WXiqa#1i+Q3#{NJ8psU-&Oo2@exP}6hl2I2KuSR zAU~xTF!XrEz&}w8K&GQ)v>1?kE(T;>S>j@VsL=D%;g~VlnqHRQla8ytv(CxlELlbA z3gX2J^v2r`j#o?Tc(HS>hSAqWjYNpmO#n@c(39cvyyMna*W^dJ)w)oFN4egjuNW;W z2veBwIftxcSYoobVwQR_9@ zT+BeYnK7EQdKqKcslJGm|mNH zh#$-*V9-D~{vm#wZHSKi_y_zjG-7O4X5<%z#9UNT)*s@hv3o6vjq;(f%db!B#*7UU z%}~1tyzmR7cPLiiLvv@IZ8KlnzR=~DaEsq6cKM~gzTme#$s=dmzUbwbhV}Ei?Ma`) z`r?;g8rCm>ZLb6U71K!vuH5nQpYn60K7N*DKEH z>h6le>v__%UZEKWo!{g9X|s;*I(lYv>=;{~nGyrmxRZQ?gK-lpaksX1oKE8O5i%Q{ z6Psk#qUgU5hi$NUVl#0AObEBcMvA7&WiU>B6R$8if!$Zfdxu_Gs-C2|T9MvqwSI9b zl0d0@!I`gaulS)BJGB<3U*1!lcn@~1g@=ssy|B~9mc<-f&ht{?1hC~>saIFfwRU|n z{`>NwkRB`kSM#NKr_C1+y8g#Nx|r5rs{(~#`_}gZ$qp%8%L~2ylV9f(@nN!LD}`m# zcrRrTWw4CHlUlQi{iY(WS~>3gW@ELAQowtyCu+TAygf_ziTP<-_e8A%uD!?8{Q|@L zv9#CC>YFTN2O279beI|TxO5ztm^i)Bp<0a%Cm86cPQVMV4{{aA zW2s+jE~8FjhSPQ0_(j_Wv*>eL^~y9BB|WS`wu2ufOn^}S~>HhWBM)oD+c>h;wUG?RihM60v)IvuUhrU3%N>6AA+9b~(KJ_8$TWlE&T z4-RAaI89#$_Lb5FbAxd{$ZrND-xo(8sM;IYU8#u>^fi z+2ymzuAtC(0;f6(RMF8$)}fQb$+IM>%fX}K=u)U_KY^vIW`^>$tfi%2^$V)J*JIy= zIUv+dsfWaKziTj!F#N*usxbGHK`D0Bz&g)%D-OUntIRLz++Dh$aV zul}ism3!|3*fd%zNywo9B1VcNsjbmwkF!b>8ah>`W{ob&@2k4#a?r!igQ|R{BLzx# zxmRs-eIjomN@)ZxVuW0pDg7oc-NIKsYt3f0L;1m5m0Y(7ssBY^F5w$kiqRLTa)c9G`6F7|lbR@;%M#T|ZNbCzaxrTRKeD>fP=grG5! zOZlx-dpMY_R)|d!m(t)V&IZO*uuW<-RH8Bmp&r%aN4>aFaCcsDHY60)8>cNz2bo%F zp{GR>QntO23uVwMD>vrb=YNL&M#Q{NpIUM@c;bh7 z5(VJjK{Wc_D7D*MCbT}K4qk>2PM&hi0q}vD>e3)eV=&F~U3Ulnlky}L4)$YsI)zb` z3yEtr?Woo;x(7b(d2JvfNv6s<;9nV&ND4?iV`0y8MvjOy!qY?*NYeC?GmRNRTk6s( zwHaYXCw&zNh#B#P!E!LVbe(lbEW;mT>lxa*@rDCxpIR%hfprpDq&dq<%b!)EV`$3j z=AKwEwLy~+)@t2Ttu9`@?$+q|vDQU3GK)$}u81!jo<5_XydH=Pz;rBc;GTsuU%1&l zF}pB*+FdTbvEVMBDxO?%=cdk~oOf`H*u`9Poyl?hE_D=DbR5#Fg=lK_IWXmk%m84v zR>8ujuv4z$CN3I9sltzfbD$H|H5lEdE`stc5%`1Cp2b)wKO5CX(9qCYW&VtP1kzwV zg$XrJrM0U>(Jj$FZ%}!-$MJ?-T+m^?S7`BqJ}N5dd`Ms~2uch#jA)?K86&1IU2fJg zgY`;RX1;ALO`Bna;@cW0HC z0-gZ8k&(NWdaZ$Z6BaGh+PIFR6)bjIZCc+hZs0Hv!+=?-=o{H4t9J9$ok^pU2vooL z!E-EXDu~ysR=^YO$fKZrFC9yKw+s@#Q$Dk&EL#09L51zgbmOelSe)Eq$rP*+(ZDrm zb;jcZRU8f42!v9no&fWKQde6R#)qBWGcE5y|I-VyA95 z=(vM$c|V{@l_{RRU<}dbS3ga!LqZicB)Ph|R&1VSYYqE3Sk2IqrgGEb8R?<;4_^3+cuManyGqUm z_>iB=w9vp_N2xCLH~4}j8f;}7L8$<@SI{?gX`O`EkZ2HSb2XRJRtJrr&X!XS>XOH> zvvmOl3zxC6f!#W+ily_>YQ4czK+-kT*)BO8m6RO(k5C5 zphuOc?d49H1hUwojhl{xu811~PxZpCS!3RbU^27daR>9iAd(rZJ%i!dppJk>w900q zv5WR(NCa65d%%7;*h0IH!9H;u6E zcDO?gy3*JuR1mD~%m7lUbx)6``COtpbC6ybBNFd)8qsL*=d^qN3L+E{^k}_r$cFkX zXAOshObws4uAaRh)mtJ6e`YU1eN3Bs5Q7-_LEjLekkHLAf%ZIYV70y8%uE2(S%}oh z(xMD%ml0Kd?@L)jv=?FSUm^+D^!BT<0%S&zSgk{wZZhzLYRJ81e)NKJH59_y6fn(0 zcaAv={LonHo+EMVQ;TWhgHu-&b*=Xoe%h3U{p3vS@7-xW)Zl#^B(qUti8FF>_0X5%(4z$NK;}~5KzJce zP~;?W+O$S}q0ZLNJnsi#$^2%P9{ll(C5NA)go6c`))3eTrOgF=@P2UZ>XGbwR8KbP z*Czd<{9z@G$rzVUcazU-;Hlc@&V7@P^QF$^&W+#ol@9&8@dv+{@yS0GoL2XeVoC8f z#s6FJ2NmC?`1^{_D1J`y4Q~^=#}tc-=M=w7@p}|YiWS9m#q){{#dj&bU-1#e#}q%V z_&bWfulO0oXBGci@$-uRp!mga7rwtvabEFe#UE09K=Bd9k0^dZ@l%S=D*jK!FDQP= z-IC5L6u(;W+Z4ZB@d3qW6uKdtz8im&sSn{*20gxizohsD z#c%&5@n29ZDZWMVM-+P>lyL7*eEdV={;c9x{Ia-Tt$3Z{xZ(-LTNVG6;@cGOSNs*l zPbvPn;@>G={wtEs*D8Lq;z`Ad;*R1EC|*>2Qt|H;U;SaBmsdQcSXX?z;x8yZrTBv4 z^^Z#Yw&J@LA6NV<#cO_5!YwPlUGc+;|DWQw{5J_#R(!kShZO%*@vA=~;cis?KE+>F z{42$4ACqu3#kVN_C&l+Gep2x{#aDh*;{B-NMa4J#y7)h*_`|;;?)Bdk{MFwQ{PvFt zenjz;iq9*4?QhHX1;sZit}DJn@e_)lQM~!%N>}l}EB>+KYd<01->CS*iq9y1;}i0| zu6U>7Pb&VR;#dB^67E|RPbl81xT*L~#a~str1)owf1~($#aDh(=zfjj>lBYDeyifV z;)dcYen;Ycqv8#U->vxliuWo0rsAI{{;lHcE(x7!#W}@_;*R2fP`qF9VZ~1<{+{Be z6hE!_ImN$J{Avia)6MpyHn>{)6JTeoDd@6>n9%P4Pa( zpHuuT#m^{S_LRhbjpCT%8O4g?w&L3re?swR6dzaowBnV2Aaq}=cuw(x;=fURkK!YW zzpeOZieFH?`VWQPA;o#cvf`HFyA*#~@q>zA^+yu_J&GSzJoL2q7Zkr&@z)f8Pw}&g zhyPf@&nw=cc#q=y6n|au4-{WeyyDXm|2oAtDRvbhBP@y4$gcU|!r#Y-dNf9*F2t|&gN_=MuKiq1DmxJ!!9 zDL(a0;@`Vg@IJ-I6kk+)>%Wxm4=EmcjksqOcN8xwUjEJU{g7hab6+R^=M?W$d`xll zweo#KapaJ?74K8L`t{;}hvLJE_q{>9MDvnMlJ;l2eubmYCam7W&`Z4jpOYyRA75BA@4}6=rA6C4ic=wIs z|D@u`l(=tHtSH{8c)#M4inpGS@Q*4!uXxwA_#c`PTv2??!-Dufr+Dj0ao?{vdXu=% zDL$@v`6=<=Q5-Fb`$0Wss+KP6zhxPZhxoXl}m!h6(3i; zXIcCoRh(TB_dSZ&o)I_n0Qlciilb-6eeq_&yUz*Uaf{%ciVu1CM)7|_@!sze_xP=X zrxYJkd|dH4#gXrka5IXRyW(C{{FvesiZ3YMT9R;gDppp-{ixz|intKV|L#+~t|IQX z;ysEN6<<`mt198HSQB*C1)tmyys;)&QG7zNeqQ{aQe3{ zw~71ej^N{pPb=aM2LHRLcy&+Q&`RciFDib1Q{1Clf^Sm1NAYpRk!|^YO7SkmhZK+R z$oEGTpHaMgSNtDQ+<3FNpHsa0Uy1vo;>PXbeyif`-z)C>6)SI1x8k#k7r#&ZFDYL2 zR&gIvJg0bv;v$UyA&T*eA4s(H}d@f#SbZ7QoQyK`TnTl3yODrzxZENeBs}T z`^r0&p5nv*Ufhq|CHSP`Gr_sPcuAN{D{;;#wbuK1whB}M1g<@>neRga7NM#Z|~lfNPUm;I*TDaD@R z#orSDdp;)kwBotn7WYGnPkdb5cYi|gLB+=uPd%aW6|efFxJQ0R@b*iB*Zi(vUGYK1 zXB0PnPrkoZ@oB}KC&m9+#kc;xxG(#ZU{CRir^J1a;-iW$dhS1v@6RdT_($S?lj0Lk zi+lW!1@BUPTJhRXi~locU#;GKUVc-eD;hdw8GPVo-K2NW+UUh$U_?ncF);&so9{}q2Fc&*|o#k&+A zQG7=6^NJ(?zr^b)KKp{Wul^gsQ~y)&F~w&TFMCn^ANX6r=M|s$U*f*w^MV%@A5(lv z@ru8f@AoQR^96CwDqj8%;vQ9eR`Ckwi}=G;iiZ?OzexNqDPH_yao_Q!f)D;P!E3%u z@J_}0my7!@#XG-3+z%)|uK37TivJ6W&s-sH=c@$oRJ>pDV~Vq{knbCcSH4o*rxfdo z_bOgeyyi*?H>+4tyhHJ_|3khX|L1~FE8hCm;(n{*Ma8ETFTYB@Pb$9fHR2xmTEW{D zA5*NnO8ph@QM~%q;(zHH!50-PUnlN+6+fo|)1BNA?0v90)s z;!}#xeS?I%@*4#g6?=*gD?Y9G;x|dS$=7Ij#SO)~74Q9K`Tm&Vvx-k#C;peeR`9su zieg>yu0!(uV~Wn}#r-D5Tdx=QCB^YkabHv%)s3Zl6hEZcJ}lvSiccy&tynoK-yc!D z=3B+xR@_m%@wn1ad{FUG#U~YCR9yKs34godJ&KPiKBaj14HE7e#TylOzFqu}-zd1D zc&Fk+itQ8f{m_iyDaD@R-HHz@UVc)-t=uGd`zgVzih`4h_kM@CADa_=Lh<6fxNlz& zd{FUG#U~Z-UzG0`zf*8zN$^dI8_VLpSMgEB`&PvNNyX4xNJHGSiqAF0{ai=z#WxFH{#LTTja_jbVt6(6`;+?N!u z{ULFWyhHGL#mWCD?l&nOx<}kIiZ8rV+!u8N`3c399~S?~`veyiH~zD@54~IPn)?OE z74J}dK=E0{D}P+VJ*hbJ6XLEbKB)Mt;#CjG_ln{jiVrE?_>=Pef&Wd@oxsKL{|_9W z-K`u+hjdURr9%{jj3Z7dmiwMo#1Jv&iqRxZv3`OW*NHJvjCo@GEXI9j72Dqxqf)wJ*;$NU zVhj^wo)|+i#GezR)j7p-iWt4lE0#;d7=A&q9FeJrpT+2NNwM5COA*ViDx&r^MRXNo zwiwIA*dRuaY{ho##At9`v3ysIBW@^`bHu29Q+!`Bt`cLS7_-D^lcU&fff!@On0HsP zzEX@{_Z7=wV$2fbb1}Afpm;q)jJaa;$W^Q_5o6{<#j^GzMT`|=-^YsO-Fb=_QKX1- zo+_f-3q=eOW3m{9V#RtBF}jLzxfmhcJCC+Sz@eyuUOWuRK##G)_qVcdwo*GJTba{RxDSF zG3<+CS-VORUBxJTRV*J8qe-=5IZKQlHHu|}T1Bi7W5zeda!8#bmWi>jUa|aHjC$V{ z%UNQq6rD*F+KRnk!&Gi?KqCdYu&8jS%BHF=mLdQjAT_6x*4Lae^2l z#F!<WDz>u|;~X)Di7``*d1Cw{MssV$&-scmQ;g5W*wjYxy0aKd#2DL6 zu|7|X6=M7&M(ys3*R90pEJj~3?iOR77>#Td+sBIWt{C;~6ze0zm@UTVVr&qjYY)YC z*<#e_saOsZqd_mlvWFOr?8RjaEz$NsJ~Aisimy^bzABG3JQzvlv7A zh}-v7#N}et=%-lTEyfHnJ{O}#f5qzy#279{14qUB31SQvV^b%^`o3b+9-vru6{FEW z#j=MOodzkEmy6NIS+N`>#yT-txhU3qi7`x!nPPk{#-@W6+c}G|M2x$KDAwNy};=YlJxN4Ll?iOR67|lm3*87SvS&VgJbaPX@ z9wtU-cg6BLF(!(!P>gfNC|++6qpyczIaZ9x;}y%56BW_JOA+;^Dx!}V*Lf?JQ^XkL zqgW1^CaxD_ni%hj@%ePc>qaxg?Zp@|Q?Z;WM!Q*x#tJbuou_!+NQ@z3EE8kUeDUWODB>J3hKNyEs94`ZjOJprS)^FMM2vkGE0!z8 zXtYGJ>?FnoVmu_qEHO5S(PpV)dk-F=_-Vw(GlI5f6zmTa16iXtP1_x{nxz5XEw=7(GH2%OPSEHYt|% z#ONl*N-;8twThHKq9_Q8iS7SpwSxSruUPB>A^P@^6A(;SJ>9!pOgI zsf7HSlh2TU)A0-PZw%z$21q58A5#{^8l zbj-p$EJ6T+5CZwXP+}Ouu^l@Qf!&BfEcW9d4&x}~pLm=^8swiAT!H**{0<)A5eo1O z@~h5L$gdjZSAz1ZI{B5E{3`1=&wdTj0Vc3O7j%OidcYpyXe-D^ zOZiABAI0P&k9?FF0XN8d>Pe9I$nsuvCgi=6yv55~rMyLiVH)L|7#>lWV8!3hdQ*N4LumZ2%TUF zYxF>G48ULv!$^$91bAT@rehZ7V-c3X5B>2_g<1qxzb3(SI%LV;XI)_A|cO)+A~1=IT8`-t+&a+Uh^b zOkSL5>v7`2Xq%-m%iZ!%G_GjfRa5Q8WqajFJ16Cgj}zt!zt(Mkvubk(hZe^=ocon0 zoi(-dhAZuCr5~Rp9m}W|{w>>T;do?YpVxyL&w1yT7v2rFZ&q!7UuWN*484dBMQsza zXYA11z5DpSz@v}lw(pHqT{yM#HdFJZ+xOP4-uK3+U0Ug?O}DqlJS=`6u|;k0+>|bB zK8CwH^m`a@ym!aFsg_;8oIV%Wb$+*gJ1b9Wxd>mY20O*Ktt^UuYw&lnueZhBMDG)#~^q%JD)f+m_8q~h?)u_u8o|et|^>|%N_l_EEmR@M)|JJuzHJ4p<+~U;>ZvC&cE$ldpY{` z>-}tf__QCfE4PjPSG%j##G(Z2@T7b86R)NumNs|+&M>$>`l0U4#?B$bZ z9$HsT^0tLr=IL)vOp82Q7V^5|kDqax+FzpEY%m4f*5VFgwB8-k)=ynOo1^Q+OVqa}4;))hwFy=l74+5U=}Nu24z z-+7}B^|4vhr*C zW0PO^*m%$8&L^z_uSOgVQ(Zo1jKu`ICcQ@F8xANL{B@H3#`ZJo!s=&Ln<$^i_TP2R z?fUn>Q?z;;UQF*gYFx5&+0;#0?I$_4H*Ip|Kv3t7f!n^m58e5>$3AnrkZqIF%w)Gs z+fE*A;o8JHE@l<>F#}Y2|gL_;o9WT!|U5 zXhE!GhjH@O_8VKBe?DSutzFjp#^&b#jlJhT`iRcr6>W#lJauhm@PqIv`;o=gSL}Hp zq|)>Kg1^fXwwiptnBt#lV0AF!$Hh+*>`OoEpO*^xYiFniEZX>T=)C>EAGJ;~+4Jh3 z<Eqkn>h8@mz!?_^%qX;fH()2Uw-F1NR4 z++Xcz_F$#@hj%)=>i@iUXe^nM*k_Gq|Lo2EHVwbue3adq>GmFHFI_l$z1%UnHa@Gz znTmvgOPl#ybzQiA(e#3(rRsyb&0A9|vr4_)WmUE9%ftG*4?9o#y5slfKl>KBEB7_* z{^k8EKhu^gqZ1}s#jfajw_w$;LrSL5zY11`K8 zTD4y(c(_`>QAtNykMA?9^QM&hW6HM&RO<$8Rf);6Fy5vTVcj{c?Ve-3<|g~7{n(gw zHzL=^(Btj6RdaQITI}<9H?#BP?c3MS9J+YC|3t~O4e#EM*>)ys>??Pr%>Qamg@0{x z@w-$dbNPz?hA;D5T2HLjO)j-`^tKrrwQbwFCX(logUn=6cRUaN$yREn6lS@neRXc) z))KdFKk_T?4LX`L`?Bd4yK5)>ug`2#+I)4R`njM^TW+3F74nq=e~(mq_Al2}b$G>| zUQKHETlG9Xsqddyze5eqDR;M;p3+uddgJ)_lWY3NSz2{!a(Q(3yXLF9+XfY%_5Stu z^_TgZZ~jWyH^|Sq@KgVk_jzkA)R(XBY~;J)#K4G0=iQh5yL&aPF1{c*QNQm`^M(OG zdyW_}GdyJF%x#*7uX<0^ShlOmSbC_9X|emYYis>$TR!MlGd!?j_-I{Yw>g2%{@-HE zrGN4s2F)~oIk5cDu?IhoPnM3nclfm1il(RLjY~VU^vs;L+jjYPPVHi1kiKYIsCsSe zR+FUZ*-I`NH(T?)S&st_O8s9hD)lPYiP~GCJ~%nlB* z%iR{lYn49e;vM{Ngiklu#2J50ZQ@?+`+E41SD5PkAAiQ)*K^yYHBzf=xccO7kG;P6 zygs|E+B5NZgUPWQnkCJDx5;f$W*lMU|DmDP(SNr-9vj{}G&bn$6b}QfX@A>o=r-Q` zW56-{dA}|?3|#(u_HCVJL9_bUX2DiB3RGZ~2 zaW8&qGj?40?)dV5NB6eZHw;y3f6vIY`$Cl~gNq_|FVUR7KSMI*Xy&fwrnkENExj|X z8e*Tm7`3qL99b+e}cWE&F&*g8@ zeY+`(ZjTJ;G}Y5WXX3h}hrS&Qwi-~E`nLLCSGCEV_AE+pn)Ld^fU4tH8^=z)rL=Z+ zYnx;7RTECGc3$4G-~IeU-T^&xf3#W}lD#N?f!)*ie3QZ*p`9)FZrEAiadzbB`KoC_ z(s-L=vSZS5*0O(rQ(qqoKOdqK=egig`rHLujRK!{HO;UHn$uOy#AU#T&=Z3PtZF`~ z*?$p%hRuFnAJ?Pxw=?}0{J0l!?U!+3p5E(qsqOk)%N^uBK-Eik&-jHijG}{{baIT3 zc%ZE7xJ8EptMf*d%ajdAKXj3)Q{S@Sx-{r+At_R33*dyhWaRZVPtBsYsBK=D(#5vZ zf0O(-dH9a4Uh;Wk(aPUJ_cPmBsoNO)j=yD`IN(=O%3ZZvKQAhGIHT+yoH}h&bIujg&s9?+qobAW$C@W{gbruCaYAY>uqj*;*FkDE=^9Y*gMdi#3IwZ9Kfil__DQBK@HBq-y@rZua+Y1}pWJFDJl z`N8i+)sr;FjoRqYLwo(CEjbcG5>P&UFzPuqJw&U zjSRJZH{|{UEZTx?XD*_ zxyO%2JZwH<#=<=jvo17m9sDh7%;(UT(>HDR=4ZV7eMoQRru5C{?6~!Z&fVF$#N|&; z#k_+h_k>C>%~i2m4A*^*nAvsUx0U&&{fpZtEuP$}Nz>m`WXn$0?|svd+`};E>VA{Y z{(9qcPJf)>W7~W4nf*;H#(iu1?NMn#tEVds4A)G_b&gAZvUpx>QfTAtyAL-Rhd)+H zEPA=P`KER|6Z_Z<`M1=6M%IiSx`AQ)Hg~JMqrKp(5F207p;|ZJ)#PX|A7QKd!q)i@ zJ9cOl&?O+GXxrb1(J9B{T&_&>`+3D;Sb25Nu7#~P?hI2sc1+8qZASi1$=nAc7W{nQYOiwc)8x`Uu9<4iA1<%yW@_}S`Ae_vpLX4TTR%tIwnqn- zot4g>WebirpI~e?a`~o+@C1{zi0xH&7mru9DO!I-t#tp9J&ik>c<4RLaTwx#!DMLQ zf|t*$EwrrHb-q0)(T*?l`I?;Ci*FU9M0+47~M8;^%m0|B|8Z z_WFfp?ACSK9{FzG6J6>wwwwtz35YtnxOK7D$@SiNBr%4AmF#{PqkE*SbK z@z<*KB{fYqc5*iJ^!@C9;rM~-Ohf05edl$!o!w|t*iXkuM{R3KhkP%?x8rlu+9Vyd zoH^`zulO~i%(Esn*;e;b_kzVuiz9Z2etn)+wypWXJcC0UHT))pR1fTtQKl7Is;}*$ zwRO#@CZi6z$IKp>-Fdgv`wRIUafj@>82W#lN><+y$bw=chHA53M6Q*1oYPzVf_}Z4W4($S8 z>3%KOx~5TWXzRM+T>kO*xWWOsaYg z{2Z}a8h+;C)G@6Z|M_$@FkAU)M0{%2R_~L6+s2l+*gIswi;sG3?nsuL3{$OmyL;jD z%SrzF>k`#9&t`3IHuh1R#jq_|Be&PxIya)z>+fpwnzUJ!s{7RHK-_E7cB;b{XY`w& z@;%5W&(^r^^$N?Ta|Q)!_I+d|cjn~M(BP_Nds+{;6f@2@GudLbdcJ#3hXx1xp*=sg z3*6Rq{mz(05yx|Gk4Y*lw=ym&4oiNqv*hQF^sjv1VR-#{ z{F+5mTlBx(#^~&7d%LF^W9RQVpPst%>gJqHx{veQymNS)UadN-Ztfq6jr~6@A^k=A z`ttLEe?yz?-_d1-`i2Ew263a7CQE?c5G9bbLnOG3nezL;mX;8wJjr* zRXs}PmpyASQu|-&4i(ics#eYk=Cg;69Nm4*MUR`8jwbcGJ+H+;)kW*?_HX&G)lscM zqhHwW-)f?p_H|(^t=myH>%vrhZyyOU`E8#W>@vcnbx6kEE9+cRq`ED;Hcy_~ed>u( zPB%=4p3 z_A2cRs!!jyrc~45K)~35-0$yvnyW0mdGq{)kXuQQ_kV2L>71V5aw~ngCVH2<{?oAc z`8TKK%&`lD*8R@wxA9J0$GaZ31}#j+WX@@EFgju8e~;I1KQR2@xs^|U53|>3;t(3G z>tJ-DATA{HN%JN*-s%)}`t~X6M87Aeh5pyfbOxu_6%XkgQQ%?LdGq>S)mzT}*GA{g zr@EXco(F2H&4H`fLZ4`Ct3Bx;8o`{%wqY!sfiPlj;XFUNxvq6-p)@ zHA%a$*V*b%(>Le7A2uF(_HV~?rsdvhf87^x$e$yKJuIp?ZCoTLqao*s5dv0&L zk`T2fvd84Y(lk%AqWc$9P5aoFQ2;j!J%X?yMd&|3dszdxT|kB^a9 z);231Xq$CR=U>1@w?34|X_e*UckYVYV9j}DTYZ=!+NH}hw{N-gJdX8R zQ77|XRl34A^yracpB@gZtMBn|$q$DgBh|_`v~JhD`4vm)?U;$h={Gjr&^+JxK-*vV z{Ug@TyUsb~$K2e839syDkGQ>Ui}0#X{+EBJMt=NSYtinsl2f~nLGjZ=QXjnE`XK-N z?6?}4!5)=f&3&xC1`pihH*faUA=U8(1|=QU5*s(@*$gk4R^8!yq1vu1595ZdZrXp; zMVGHNtDDXKUC@1r&d@b(ew|ZubKloIf4lPE-%$;R4BvY{?efB_{M_~rb!T@rvp&H2 z9q1T(Z?n6;s@adt(Y8+$PY&98@%Cnekj6@tan`XxIx`|(6h94$@_DXZ`0CBAHgTTw zuK#JiXVwY7foA#%%g*Qh-1+0e%9xY-C+ucd4?N_e>ZNuiYmcFNlT%%`+FiSKedvSM zm;XIT3YaKFN}Sx^FTXSAb~!@9wa@~*1a}l$k@2!_K`B%$$I{q+v|5u4Osl!vDK184da?y zc`A3aa_zEpRnKqN&J-W1)*Ad+@6?&V9bLU%T)AX7_h<7CWxo!VjnRzKI%ViBEbY8) zZr|D7E0&%eZD>|oQD5R7;IjCdz-y^=ZI5wlk>dEt8i~G3F2|Z#o;m(7ZU4Eo3`0Qp^W9J#$D}8o_ zdH9;oKV7RjjC(kwLrMOaqLh_o$_Jj*-q;#zx2$uKL!>GI7>+kS2Ricx{h zoqUgcOvMZe^9&HSym&0G%O zwXN5la(Ql=?Muss0F#equN+4Wb8)K5J>hd{_mkOMZLP0w(lfZ(a<)~;v&L4=bM|gg z%XzW#ckkynmo>Y0IlT3I`_h~1K6SeD#?I`tlnIv#=>j!*QPsN z`tdRVSHr)f}s0o6`Q4gT975J-BZ3)1B(y(p4tS ze4xEJM0dYUPT8Sx<^w##Wcm6wZEH?sjLNrmnAgKPE@-@UvC^1p180Bi;uyE);J_or z9p|3)%lhV3hT%C^7kny; zE$TRN`_-TxR^5-+#3^4m9wNPZVw7^XJ*W2#9+vD8Vfn%Jps|kYmG@&vkzQf8zt@UyMkj}q+V@gzzR0n4bWl#IrLn7S=;$TwRD-oBVAmQy}zsXIF?ipWY{)uN*N!cIjC>NP_7 zoPFYhX2Cx{w=Dj7=grp>785!R)O<6t{NkEj^Gqxc7w9}VzBS`}&Z+Xc`l!sBw!65dILKHS!CH$o=d9V8W+bIWf}ZX)P3 zTqfLjA{CZJN`-Fz5}|H@L|Du1>R#Lk(M3xt{23$@LPp61Rc=v#^HCx=X-kE_+$8X6 zrbPJ4?MELqWx_67sqoraB80V<3Ikh7h1f0<;bBLqpxRL;+~dZ97zdfqz)cI*V*X+`xxAsc^i3<0BsGE)zzyl?jzIWkR!Y?0c^It=9e%!{Q$?^jY`e>BTVjT;ObF&C5kqddG8!Ng z94O1%w%^e|`>`zrD=yJ2m86G_d7$_5Fv)v4~zhos5o^o@^Pu_bC zb?AdsZtOUAMk)kRwk^0BWd~(=I7cd2-QpN4rNW)tQXz?bwx&D{*w&GCE7`7q8(zl8 zNQI(8&LKi7C{cIU21|u0eh(yEt4F#IbKRN2q>&N89&okTcB z`TXJB$688-g=3_`b8arV!OzCCK9PDF?I95s9p<=tN`#^FWJ3G_@+L_l45*a~;j1Kq z`E;oeu!8+iu3O(qg`1RfJUMiin?@c{H($6pq%V(94CL{H`5rQ13*U!i^_B=tDT`cg z4l1^h3Eo*!;d6jgki3)%Pk6jSWuZ)HPMzOmIVOtp)|Co-`Tuju_u*6ONKY!bFOv$% zrBWe|`mPut6(-x0NBrzm%6^NPRM1tC3UhfqhcFd^*quWE_8R zkyO}Mz-yz)UpI-ML%y%2&CYAd`I57vHDrSPag|`&Xm@*wutdI}%Ti$|?Z|g0ZIOK> z=tzZ#skBvzRCq;uSi#LvTC}0-7bN8S zxN{scZTPW{Owc|{`{O*-DK~-Kv*uVE$=f1wsM1U(45RH=mrI4y+=w-l_kT<-^;#+w zF0)?#>n+u28=d*RY`dwi^B*Z2`ju0(naD`$igJ0zjac`%5vkOP_VYs`OrJ{o=qeH9 zkHUm-oEO{3gi-%Erd8B2=YQ8nD(o6XZZ$}SYRWgeg1Y7WT5%JS9c^gSMEXGbfd%AY z7iRnSIm3Q`^K-R-q{2BKDe?=YE~(FK+E`!C{{{QYrY`1_Fa0R3jA?@N7A5A*j>M$}&&b?r=fwI$|ukqHyt zkPqa&;XB4ga{uOXiJ(1BCe*P_HD&XIJiSA1pCKnt@V*a6(l_#+h4d5V+^DF+J|40C zd=DsymEx~9mvtG5@A%0RIvRi5gzoB z37aURQd9DobuqMw9vola_w)m_vv_yLjh@usTlyGszEGWWq@0%X=-gZSVOz>&2(P=* zPc;6J2wTcH-d4;P9O)ZN=;s7-mGR1w`g63QemG`5-m8E*na#RVH^wH~yc7Lk{tEh+ z7BV5QmrVGxn_SQ&$G%I1ZPZ%}@-mP-_`uJeqMw`yF z7kO-cm@$%a*EvTY$Xud?vQj5^QYrIQqoqO`b<~Pn7=4qzJwhssAa{e$N`*?su~+2I z0Q&3Yp7gccu-cQcU>e7uL0;L#N`>DXdpFwFP>xrpk^YhWcOq9>(C01Yy&I-Wg^krx z;h7=%$NcQ39XY}A3auHd)X8g0nGnl+>Ty1omq~_0;)6ME%I1f{j)+d-+|z;Rq5 z=bMlh-@4J3*e04>`H@E1Q5RKRr9v}1nXrs=xWgD_#`%{~w%kJCk$nzZ!<->QDtsHyoQ`9Y&Xx#W$?u2sCGy+qqZzX_>?qGC65%g-TJeN_ zm9i^6L%UR=&ger_HZxAq9=g+(*Itwg^2d40{W)jKdT=89An*LycbS7ku%@4sN;uc2 zoMSu6(?lYiWgM*H=N&j!RdOVukouxL@34=Yu~MNUeUdeO%l$~|mvgB~V!S>}dAyMb zgUN|!zEa^9=Ww{EROo+$vG)tU@$=Kj^Q+`&5Mx6DGK7fQ*; zPxqPg@qZJUOmJdag7G9xmn|6ox??>OgjJ_mBDx^Q6FQIH57e~&U_ImeSn+pDUO z3QH=<8|HBO%ee-jUd~dEv!+unnNnc_@A>g8V;}9(g#CZx{Y)2-tIwFbGX|YHN3JkF zOIaUDISA}0nf_b;r)Ls5cloa%9>`ef#hfRuf${M-<5xT8Ka}|bJ*m)!_rFY@RZ%w9 zED!s}+##EFKcqt7T-pX>&ogqXka8aUn*NnCa%CUQ4{!{$fj@VoLJIFYoLq}qNqI94 zo~C?8l~VtVeVP`OA#-=9O6GK{B*H=Zi96J7A98v(+ufmTjkU>}EZQ;oS=5a=s;Nw< zSx>H0PhY&5yT6kNfsA!WJte|O%Hie<=HQGUO>RjAmkR276m5#_4!Sd!;`m4BGxzJw z7{M6u-&Nkf6@4V-dXT=xjbo`Xr9G%HUtLT8PyO}cIFl&bX)QUgh4jhqxhA6iGcQwL z%&jsw=ZMv8cb>d6X3V6I(5Cz%DJOk$$LS|?B<3qGxIS|WByZ?z0y(~M)NkG@%9Oe} zLLHx{&KGx-3Y$3Qb;G4X4|mRwc5Pl4?U_1n!8p^Awml`>mNDF@Fnkqg^mpd?&CCeWE#a|C0Ccqx=f&DNo9NJZ0lx#_a&ErsHTj*aBjtnag+7w)6!}4Y;&QIYcfO1C5SdmncSl9j9fvz9g+&`87tK( z`zkH2n`jrKsqgmvId9sU^=|qU_P1jIa~lobi+RBV)^(S1eNKMwC!Zs%C|ho@A3}dx zMc%ww$6SPR*5TYVO_=K&(vHa+PsXJ<#-MkUL)}Vp#a0ACvgsq&*Th3Sh*!LZ-Ke{C||DaDzzQkOR@)*28CR|`%dX&C%B;~)LgmdTEw^IjQ zoS8#WMm^1#D`YUHl2>g_$d!eRJ>!{QFc)c09loP3=TY9b$*V{7Y4?~%{AK(eH;w$D z9T-rTm6VAk5wB|>lVYR+lK;S1E2A8n80uV2me^;qh%oH-l0*6$4c z9pz=lF?=JBvuMw|R?$~ok_ZjV$4zNpqv(5lbC?G(f0;x3SNG(a^fmKY&NCyPF@W|r zbq@J+jEkOCj&VGp+OlYD+l-;>gY_oRGuu5kY&xpK%wKK@ev?9Gz z^ckF26z4UyC)aPhJ}6TntfCLBx0VRq7|ZI}?+wb~`FHw~Z?svCWdZ%vO|BoU=}*7# zpvy?!>je7{y+LOpKnmc!}ik-_ezD$^e3~9GFK-DGMHa~qE5zE(O3CX=6ba2C5+3xI2ZcL zk&LPPF3?X)Xur!DW5+R|lBy$n^@YoVRDV@6?gL7f%Y?_E9Ff+*^m>R|1jUBZuU~2!C|yJ$|;k+-@%sgoB7k| zm-Nx`9G@M18S~Vzm((BaXd>r+XAgZ6`O@Sc$4-0hOF5jIDHD2gZX0MPksQ}W%G;J> zR_nx=PI=gx(qD0oH|UGvsn^te+6%|~#gDlRW9Von`cjVVA33-@ifdK_sSrll%x9Y| zPsmZ~d>(nSy`Fv2F5=ReAKf5_l{tRS?_x3Y5cYdWi+030h1Sr|Gj2_zzAD@CnGUaC zAP>!dP=}P^Ali{Wc^%luIMS6iT}mz#QSZl?Pcj~#)RqbR_fy{FQpbAcWPyx1Zd`}c zZ=So%aWWq|$T;wYZM8XXKgMYZ^CbOCT)V8~+$iI2)N7{~%mbJ&-KUMI8Pm7x%Y+sB zw0-h0ki0#1mHwg&eJ^8=-&5wcw2es|qboULM_xYT_%&zA1V@fNnB0Cv8N9s6bqVz! zM&GfNJWXW&V!_aQ89Cs;go$Zj+OaE%|6;U$5vD;6)tj2F4WHn za&3r|{ZK!Ce&hud+Kiu*TS@e-5k?o z`jBT8-tN7~iz_RLLB^Y6k1}||u`x&a#Xe4wJ3VEz8S=6%#}vDp95i8`QOk7&xuD7Tb&c{- zA&=M6Rzj>OcgCz9jn+p5s13JdSi+1VgHD3lETwXDG#6(Cr9y z_yvJtQ9%o6fpio|Az=fygR~O%;vgt$;Uvz0v=Od=^!(rR(S=014}oq&sK#&5j0C15 z|9j4vbdrA*DKI1o4ATNr4f)680@LCDJ=e^%P?(Hqpvw?eVjVUi9Fd5{0VLuC(r_MG zxPiNPgcoSS^R{Ye1^F|^9bo}m^oIPoViydDJLJz2&qfe7APhSo|1&pnID{mm;4HFm z19u^R7Pt`dXMxM`9-mQ*COrG920gTc2`pg)`Ln%!F%Yg81^J)TnFL?VgZz2kK&*%S z8Q%YYes@3rKLh!*y0`EE`FMsmkUxi81Nk$!e<36Bn?W6|(H8P&Zq3jI^5<=P!x{4D zY~3J##?}kdA^&qe`FMs>$e)A#fUl7M9*EzNG~s-q23;7UBP?Kz-k6Q0@JBE_a- z7e{a$8MulZJVp^JQH47Eg8Uaxs6Z1v&<6uB1S8>zDVTvU?7(isAr)6}6Zh~KMRO4*QJ8?~SO`A^U@bzi4Z9G71RO&u(s2>j za0?GmgqL`O|L_G3kkP?6gF0Hn2%TU92RLCcMqmskU@GQfF$(b-@9+sV_yLI;=L$9G zp&d+M2|GBz32qn}78bxC!Ptl`*okPwBLOFI9$C17yLgE=_zz$34JzvN>(GKBI>G|h z=z%^MfFW>)C#GNq=3+5cV;wdj0(-CzhmneOTtyDh(7=bbH!U8Nq5H=wk2{?vSq~juTa36ViiV~FL3mVV}C4Lc8bF@Z#n4&Yfp%?mL z5Qbtj#=#piF&|6ek6?sh2V!vmi8z5YTtYT(BNxw5insWHuc*gwNOZ|tXhIi;=m-nA zViY_u2|k#G1z3hv*nlwXz;49h5R#CBv&h6v+{0rO;U(VSKYYPA{Dh1PgJx(2eYA%q zY|s;ZF&HB-1`{w9b0Pn;d^@lki8ulIpXNJ{EZo3dJVGIgQGt)B#&`UIiavcTw4jG} zFo7j(&=bSq4o^(M49vx1tUw^PVHft{Fp_Z!8MuUOJU~8P;tl>oE&f2-hPeh>LK|&h zgif$Rci5vpoG}b;7!M!J$5I4g1HurASR6niPT(AF;W3Kv5^wMyzCdV8nVU<`BE zVh~1Q0;a+j^RNUf5roayjwtL!B2FL;*|?89JVgo0QHd(l;TMz)Xgkn=4h&!nb99G2 z2ErAi-~n&U#bT^LAl73Wb|D7)aRiyTiX7ZW9*R+h_o%^dNDP^WKnr?k2NPJr4i0d_ zB=}$+mSZhK5si2x;3UrBGHxRm1t>u|Dp7?x{DS;~Sp_Yi1wFKb2`piQJ{W)@aEB*m zV=4R*jE&fWorp#}5^xNuNXJ#=;6C#36mRe!zTgM`Le`%4idN8v2`piQo)~~37zs~I z#~k<}02{FdI}wd|B;X{@;WDn{4vdY+dvr%X48l;1#yCvIOw7kxgkmS65sw5MLn_j7 z6Zh~KMJU62d`2xA& z=Xiw*d_*jv$nYfA^ z+(#Z>;tf8b20!o@O*?Wd&_){=!4fv;iM|*JSB!!OCSfM#V=4R*ju<51B+lRhuHX(H z;yGU71HPgj%}wYBpaUaxf)%>M9{n*29+-rASb;!gM!*Zxu>i~P45fIB5BQ3D{D!g_a{_2W7ltrLSJ=T3 zE*K7Xcw!1>U@jJ81p*O@ZHUG}9K~sz!!_JT9-g8E<)}mz>d*)!bMg(^Xagg3f)%>M z9s}WuQJ4sC%*1>wg+GF^5!(@ky*P-YIEkx}|B2a0D8wsNpbB;P1^F+|R6z@9K@aAz zMQ=D_C?>)iGcg}a;g4W!#CAksFAm};PT~wM;0kWy9v-6@75IocG(yRewg3(2zyQWD zhb?-;5iS^wN$|leEPy|Pu@PIa6VW(?B&6UxvT+}Ic#0B~qY_nUKqHhob3D+14h&!n zb99G2`okH+;D+&-hS^vMKLj8II}wd|9K}hT!)4sU0~FyUzM>w#A+chNh8lEX40Cja z9s0u=!{CPTn1kVS)Y_ ziqRN{$(V-OScX+tgDu#NI3(f((r_MGxPiNPghCXf4Daz7wP-*il)BN5paC5izzkhr zi+&h{k?_Qn|Kslc0Dj7*toN~xF<%AGP8{dhG|i5sFWUE+4# z?lp)T)tE@)h7dvsA#OJz)9*wfaGdbk?n%Ei?NR|j0(aP`2|8&@%|IIcms&cHPs z*Ct#ar`B);~IhMVq9Z!U5;x!t}Dbku4iy9#`P7hJ-E`2fo$Mvh^qyzJY0w4 zD!_FNt`l(e#WfJuX}He8H5%6?xJq$dg=+?`yK&9I^*F9)a4p8Q64x4Bui<(N*L%1= z#I+UI7r4H`^*yeiaRtXh&mIvpN#i=Tx!!{&dQbE99_H&k+FkGIOy2}{6Vv0K!F0H1 zk`DK*G~AJ2y#{*E<~!o+_tblX-)Ve9z9U_ugET(km7nLKIv3*nM#QT>iD9=F7=FjC zH}5NM#(DF$AoQpIiZE!q2N5se=!UoUYGNmk#Nz}65pOJa0fRl7o5T-RQ zMa0J!Bi*lmBKH0v4Cjw$xO@b|;n)_AgwKD4(7ys(uMv+QBSriLTQGdQC;!4U?f=bi zXCbml$7unt?db?^9fshaXCi2SDS{VQGfg*ao<_X)xM%2{cND_tkRiLO$B2q}XSKs~`a4hPXoz@IHk0sh>`+I%YuYkg-is-Yoo6Xxr-O{Ub&)Ut5b<&< znQGoUOjR()QlC)NOND)x`~%OiY0zJs17iPx;L^z+J9MnBoMJs+L?swg^r`wq zVuQcYjgHuzKdSW~`!UL+0!TBq5zZZ89U9wS;>o5#?857D(m4%H@QYqYSHuw{BqQ?` zR%7KNhI1c8=vQWvZAIjRcj!3`KQCa|^H_%K4@c-{ZALcD5Lkq_6m0T_KqkGXg*}t7 z$F6ei-Ss&sJ{PI@_aBjB4}ysIwG{T~mw1jncwIzRXA%D@zNa7cctSqTyohPd3 zFBra1%JAbY$SCI{K5l!Q;XiU2UiAc`UnbwvTR)hP2>u&u>RuJzh(Gp2JcmE+Ac5vK zU!I3cf3Xn}ulWTGuY92K#t|3jvWQ6I<98eAjT-e4*A*;k0&D&x2E_k8}m6~kdO zkZFGoR)2R>^!m>TAmKP2;NAEJf;9|EH!*T2N1^v21k)H7+_{0ZzV>a#ge@4pAfX%* z@XF3WOnU35ta|=C?=$rhve{oCJs;~zn(J?7IQeRZQ|B@~k7?3cX7l+va7y%ox1v9u zreDIy_9roXY5^iTzk^4=XG4nIu~Z*^_cvM27-`dlNqqiuUnYMhpXjS1oWV-QT);yN zIuO$znw{w5+bJ>r8~YgU`v`oFe#}f_^AHicZ)GAG`vjWrV1}X*GiF_@nAZfIJH zN?-L)OP@Y6Ax_?dP>2!lIgz?I|B2_=s+E>1bJ}f*{6ArNA`+v&JtU&zJ$y=Pb=TPDt}bAmm>#dM61{;|)j{4EZaF($89u zsK8lhns;*-1Sg$>VEvyEoG~83nkfhh)+3n5fc@`%$lyKng?9xq^EPina3)ly!Ds{$+*>s}P1gUt^ovi^xRTMd@3s zIr=@oqK4Vt1~DO_$C(4HX6)NAdG>I7`MF+@qPr9(llW4K}h`oRh5| zkERr79r6n^7^n zn~=jWrz1nERWk|FpMNaD{Apx+8yO7eNTw}B_6J?V#P>gk&_Cl#KFf5I{$#>_h*oh` z>uq03OpAQ(CO%*O7{au?3}m|i9qWC>-t`9`g^oKLf$CP&S~7k5Lz4Vj%CN<248>;3 ztdG{{)G~NDVl&!87HAGfyruUt6-Ss3n23K;6|x?PAmR_G51#uUpqQ-fQdZylkuI(8 zDuC&)S5JPbxT!cJ-i()-qD30QdifWygfUc|-ua?Qo`B3mf^#YnLA~u2aE?fSBB8v! zlkdiR*4Up(*ZomE`xi~(b3aVm5${>1_AVEVHH4$pJ49M~<_bQm(R%9Hd>$exuPEZP z2ie7xaTmhO?cdAl6z^Y9HD}&x?=mlVi(W@u`gQjucy-SpQnr_N-NTxRrI{EcG&rGx zx-e8@Z&zPFt6Y6Y?RCa%M8tMIf-{RpkY(FvS=Rp3P&2_&(KVcjBi`K{`~E#*=azJ0 zOvA$%bCOIsliBI%Z(xLIFYevS6nCA&P^KgAn=yPYWr1nWeNCP{e=DBTFG-9(@AHjJ zA)4D8C$e9DA!CZsJ}*a98;kV1v0QJ#eGE@~li|jX7}DM~aWP8C~RUgdT|mgDzoBIjMU~!ANf#r+_hkMlknk1TWoy zU<9@Q`wTv-M3BKLpyV9{6f7_HE`mom8Pr43VyAo4w&3X#P6pq7ir{bCSt<=??OptmBVHCoA?uA@c&=rD zH=fAEZ+?z2ZQUDqL5@geysyAr47cMEL6g9H zi0l`uHduvKXT)FL7UjQ;AmabBk+rz|1W=^i{{mxX2$w!RpLuNC!LXIcPDf195qw|9 z81g&fcl(mhoWyO+r2qJEqQ-|oj#QY^dnK&g=y$(oVI4jMbpJNyFj{ix{RE#gzhXX% zZ(+zu2C_Ytq5RM9^a~!aoc$r4Q2F5qB5B`B@9eyk$vQ)PBI((4HCg&q33J1LhVArr z+{Q$Aiw2L(!E@{kXhXZ1mte(Al5_V-`R*l;PsdsZ{ld1vuScn;EWV%48UT`OZF;%&GR(HTEr3gezI zl&TEIv6MR*1h5!?0$p?;K@soZ*}hi9PGx45QYYi{TXU?&WkLgE8V?vjw?KK>%HgQ5W&A|COMOWbYyo=*Gyy z7A4{7YngJMXppHa%WF=~r5{}_6_<==Y!s#0{7{Qym1_{2`P21@+Uf9`zJO`ZUB$4A zX!-_F77_nGk({+G#eZoZ(M(Z6%L~wwKmQKF1Zoy0mUXe1MBcy;nDX`c40Xv%!zN+< zf61ZjeaL|?hNe0eB55zcw9`x@!I@-l8(F`PTZV-G?^uyw?psKbx%gY1Y`hsaq1g9d zXGR4WNV@#+N61=zZD&NMzm^yj-pN~;jLR{bv%K>k;PYognC^^)34Vx`<9$y~`1h#w zIGE`!rRaD^KF?6>fV5jKL}k+Ne?lp}@2d!TGL-jnpnG47g>oWox|gN>)VK+uQ$I`Z zdWiA=%Er}S^IW11+hkmJ6%jc>R9v%Mq^NOnnDV!kU_0hICZFLG)F=KbVb*3@D%~OS z)9FQ`vM^JX>wPHFF+pndEqUer>q3SPpl6}qr!dUB8Da2QV|>1G2)@VA&bLKkFVU6l zInKN@iWz=%4#O*?JN`y_@|U-v0z^<-I9p6$G7)s|_Z56r-xkeYSf-dZygo-DxRk-^ z81JG^yvJwY>1;{!_E~tmXaq}mXC*TCrnbU!jXj*zm3B@>cbMa9i;0cz-7P%0d?SlF zyEQ|uYa(e2uz;}TrFX^qjKAdqhT^`dWsPlIg4m#UjF`@M{r}GA^id2s8+bDd5j=Pm zfOXX0NH=(wodJMx6k1|v>*g&S2Q`p^JKS)6&{kdwTSN0yb z%ic9-0^J-0+6D}tGE6BxbMyaZxsiQVYhkk@kKbPUvCo|L;^x03)_!S5c zaR{UTK{LzCxrJ{MjJLGz=%5q5t%RDmLOczm&vG3Bls+f;V-Wvj17T$;z>~*h*0Sq*4{zK zBMb_tqG!v*IDZqPyBv(rKX(h0Z*9w%1|l6(7vVYD715E{r0c9mKYI^T+(gac^KeI` zL59T0YEqaiZb$Fm98w>ZBDkHyM_sjIxOo?gn#^m-jJjdGMZDEANOpa|=ToOMypL)- z{fDO$Emdh!_NlS{>o}zFzY%k6o8+?LGR6#~@Q2s2YTlzL!>eRL{+%-F=wXAPaRm9t zOTDS)Bi`d=taqX41zAaD_1ujNvKJ3eSirATAw~N4W)>k^0+@bG z=1pqBaL0)V(=!ul!drF@-lLC;mDg`BQ}mHRdkvM0I+suR2#@6mpgoVnV;bf#Z!jcO z>gN6MFcIq7NbuO9h#iuL;1|$iGTx5RkNwPNS@w7X*>LaU`xu``uK4$f%FJEOcyUdp zcTG6TJY8g|lyeCbNgEj=tntTsW0#Ze7YcjW`zwV0;nHSSLbW6&>U;UDuKK%fMdeyS zhP~bl)UMH85DsGNWPigxW;Kyx&wGXH#x`2^{0q-%M}L8coGzgM83E?YKQr|RwuNHh|ZddzH#;9PrT*r!rnWf3nC* zZ}((kP~8XHG?+^R)ek>lIDHZ#?nYoP7XNO_b9!k)YpQo7?GY)9E~tpNkLw(N(-u|@ zK}3d`Hx*;eoAwT%*qnrG#k-X%)_-U}dC zeOPPJf5!(*b+(L{-(Se*BQcI5{`#Ys_2lu4X*-l*6G~Lgj+g)8N~UP`95HU~MQkeN zf;-|5r&}VudJoI{hl_8`Zu`x zB^FJg@aSsM-GcO=|CMO`w;f3(6SnwqOt3xmvg@9i4S~drjf4fwlD{-$~ z3uZHG0z=&;-ohHqHY9qi}n|QM9_8#YSo%T)xgqi0i^rpXh63X?f z4#$A!87(Xzgj2GMqh22W7wD{jhdJSGVK;k6lBJoaBxFlWFWcG5xS9#R=UCS{;1g(= zNM>QRhp%)P-m@+ig{Q+*Hk`e?AyAPTXMg6jYSA9kidpJzx`<({?W~9#42*5Y15Xvj zE<%s+leO62?~BNp=GY+E)%@@+Hgk4FPAkUFegH??Ld3lAIL@O4czEEchlh#9K*6)%9<=>TxMiwuHq=;qlLe!ot4;VcO(NSjaOJg|t)tgy-~sK9CsO zYQ!yqrL7AAS+;o3uR~hD?{0+YIS*OdnhD>(O8nkip9k(8w~}eMZDO0cV&)EhC)2mM z0{vgbwC{2_k_1;@%xdV?m^k>=^!1iMio(^OLtPxH*#+`{okSAVKk6E}lRc_JB^yF! z-O{#?1lw04dB#+*Shcnu&p3^ZQG3Ck4lNJ~zh-FL7P1-Pui=33<1lxEp|p8C@n-lV zU&FJ%R7T5RyR(|$znEO!_Z&zbU4-V&tk$R5zucCnfU5Hf(KoX9lK#+>2}})Z`a8Tk zq=vGk;=P2PR7+l_CkWXKENB~}=U|5Ou3;5^xP+0qG&+{jlX*r4{uT>8qv4vJ#AH*# z{|ONhZ^bIc$k~IS{@Hls)}8kd0$atYV=66!lN!%#*kW%DjHlpD1a{n_+E0b4ROUji z5F!olxE3jDEstc;?A`tev)9u?tUb2c&qL_nx|yl(e=xxZof&@?EyPV|6;w|g=-H(S zmFwU2E3*&-*}s(=F=_XHjtDMeyi0|D<*zWs+t5_lcE%i^m}_VrMxrnC2#_~ID^)=B zbum?x3lFkto@^<5FLmUzxQ4t3Ae)iQyB8!Xt27)gIJ80S;m;Po_8BiTMjRgA1yT>) z{Mzsq;%hbbs@wQpv(BM^3Ywydp*U)^hQUo-7sthQshg8>Ld4&-g9*Q(0n)|aklYzvO;#Aynj!`6Bh(t?R^y zPVFpVNH3n3O)J_H?ddf?ky$-IGX$q#O5ub zPV#S{$kjT(!Hup+&2~x6o`*CzlGz)>On0jui8iuz#i!k%+=|&Uo z9zw>he1;e&e(wX${wDZb-a_jf#!x)Vx?kd{%k`tX5T89YF%kMxK4wEc6IY$=+{r0F zI8HcCH+o=|uHG7zrrGMvJD(Dj{_z8e`pY4J%t@K1y*xR^R?AOuRVC7U*ND}q+x$9K zb;GGOJ3Z^;v)CTain77x$yS4xMxhfeJD^YCUtL^jH^+LlK%xKcNS`_%srwoy+j^b`0r$)PaS`_|JTPAL9_S%EQQ5 z4uX0&_GJW*;b56`55nrrH|*und(l+97wQ>TZB#+kE}M;>Sihb+#ORWS<@K86BrR2g zY&FEq6Hyl@d&GaH4cJrTK(1L%)EtMh{VV?00=Qp96LKq3H3i16=*pTQFNY${V|Dn2 zOvlYqOw}V9iU&?NV*HEHJ&{_+K6I@`<4>Jf@bSs$(1rXRG77grV7)$rnM9}KZKv>g z?;?c$wd{3&3rFtfVjj$e$}zn|iv%9H#}H<$x*@S!Ao{7ssp55JU#I8XlMs9A@JP+h zr#Ss1na9GlT|lwo3d9a~^7Jd*%VI;Q?pn*S<>f(@M=~EvC<6a1Sm41Zn&F--o%~aO zVKN?OtZneolLu!h%{4nO!1KTnk6T3EDCtpMsD0F)(Y-N<5|b)VY^XoNKSrh|`nkM! zg)Qfh z*2A>@vod~WedYL_e+G>*Iq)WD89nu!h;gCa;L+EI!!^P<)f}Ql(!M_z(R9t&_?aZ~ zqwa0-Oxmo<84V__)JsoA{820h^#L87%iQ07jbZ)s7}A1GOh6quF{lA8r`;mKUMNR# zB-_a?*&YhtJ4#wHpQ(8cHIi}oO$p(UgJCspgxH=b5&4@n@aa@*2s!Md13TD;zhQ+5 zemV0J&s%J6LSF<6#OhWjg?1I!wR)%%BSrh0n6$Nk1Yn zjY&V6gF!b`#GU3%zyQ{5<+S!kf{cDsPkACI;Z*(jUuQ8-nhKHV-XY|K?jDr9ju?Na zNY|bUrs0C$8@Ps#YV>}5u|z-5P<)B9*X&7O2J>M?`rnGhs%OgO+?&_+4@m7lE%6&< zDv`5T{!anoW5Y2!=mv$JlhrNyBRSA(RzH3(`1^vs;`Fx{}A(v49b!rDoTq>lh=e zLOa4J+s6Li*eL&aG5a`s>ozDjDF&W61H%txOH1t}ITh*8kz-j!kQ1DOfDN2A1%Yn2 zsD+C8dSzm0$@#RJos-zOrz7&YHkSBOh#owrixB>$Y8#FxEOq&X`T7+WbkuujdoKi$ z*!=3uxd#~WI!O51GCs@k72D9!Gq-v`M`rQpJ0kf!3l;Hm(ccljw^W^{iy+)w75JlN zEU1BY4b3$FW*IwjXv3e#wr75wuoJw^FOX7}{pmF)*3Ukfu{?)`(@5LN5#)Unk-wJ| zyWq!&d_)|3IclE~9ADp)KT#GtzL9-T@LY+0PvCVOj=#`+%=P+Ffv5Hcj|)fqWgAhu z^sNsjI*kXcn99uOahuZnY8x9sA9F+&IdCjK!)G~)pP7|VsQ#m(Y-#7)aXEgn z;fbHo@AW1-v4?rOaFm$>u2U?R+7R}>3}xfaTep$V@;Qf`7O}$;c3^qzD4_N4!g82j zTw}p;Bi=JxLwoC@h5I)8g@=EbOon<=P!FL}b4L7!Xf}Bbs9blsgwYcD_6i0d~?3sVO)4HLS4@S;W)SA@vEXL{wK~=Ns;9c+} zi|4U-bSc%0ryD~>PvaSX)s`_TiE>V~=EQ9M?$uo{UsuV|OF1&(v07JpD{a54?&rX8sETq4LEqMrwWaWQWtmzV_$R%wHg8z8F^C zN%L86nGI_`&=7;h)BRxXlY-$h8S2&tcc(mVJczaK@tlKBt#U{ulJU=JiJF`-n^asZ z8uz$S7xDB=nVw_P^SHf4O^V;#({uf5Udax(*G@!QmW+D(*z8d{$?#iy9kd^Qc@jp_ z^p+e_sp6M}TYLo7PKmG3_(&=5dFbl>eq40(S0rI_{-uWJ6K|MIPuBA1svoCG{vbvC zCDnhZ)hLH(iu}hIK1lOPqUYbU!44gKNvquwEQLIKQdF|WyzTepKkbAO_oF-!8F9*4!tGeI}G3A zz}MANK-!;zKYI=D;h-
    0N%4Tihww;EOqf3E(IV5Z{_a}<9`?VE2nX2+Cxi3vti z9fwKuBO5E>Xb1h;I`D!f2d1BCxNH11Y^wAwe2U>S9QDg>c3}D$hIeq#FEKoac#8h7 zG<=HE7wat&sdv3Hba-=($>S{r{dmJgzEbqx+&b{eI`GIrT7IsMuj=~cTK#gRVQ&4} z-S8sKCP_X@>!6=zc)o+4e-Dm7SNk^^K3h^H%lCNhb>b(;G+#6)=CMB^p=$WOv7FJ zrRY#4bn$nL;rrKboRwc?_)bUpm3ayb;w^>!k>6VJ>3U15|3brsjVbzXz2UC@-ChSC zX`=;KI?As!+?D?h!(Hv))K)W|>Bzq{UjdhXnN=H4(f-wjcX#B!*YN%I&lKywiS3lo z)&Kl^R{TwrFnN4+Zm&QdZz<}>zh%Xrj z;QX6Z{JGjc%TqQhR^kwAelb}hf6&E*QETC+Be$pMGpErt5`o5z2Wm6cup4u z#_*P6{FNJ?<0yaD5lZhWzsm4wnk=b(V~SV#Wj4Bw&a+$8!@B}%vs z>!?)yS8#^n6Fs>n(a-1Udi=IK@}CI@Gk@!%sq-Iop5m(=`F9wlxJ!P<8lKN&Ddb~@ z;V%1NejWMmHGGSseH%t={p^;eZ#7}jMf#l2TZ;OPGMxNN#s88&E4}$4lJej3SH)fO zne#Wr=j$zrey!my`QKuA9_drmKXR#N?8<+>;V%Bnvk%qhIXVihOHGG&O|GX;{=-|Nn8D8wbrx|YRhot(i7^j5!4*IG(@Yt0~=<1(E zb>O3~Qu=a7`8y1E%}+UhS3;M6ZuEG?UGg{4@O_T*$6u|0@F#_Q&Nh6tCM(ukBr?zD z=TX;aOrhS=5_sZ1+wgu4e7oUm6|c^JiRC|kf&!x)^i3x!;A($o!(IFxRtH{gxJ&*Q z8NS4!e@n|E@^jgL)2=m~w-o%JQwP4J4!qLvL0U{w|Kyh|Fw23jFx=JtJ%+DVKW0+? zi>7JDdAy})-v+}+5>GKd_qa)c?RrbfKksG*<~Z=xhR^dwKNf2;%%>(_V{g$ISNrA| z?vn2K3(mP>h@Jx`#M<1E<*e9OViragj?>@ z=jjgl9Xnfbm;TFpKym($Cj64}AN#1{V_oGxrg)wMkIYj%m$wxB%{6?lqy9~wQhMw| z@t0J;;r~>8q27}4hSu;Y4t%EJR*fY3;(sY&rGq|afdU&G_)f#uIq)SstALE36#cWraA*CVQ~J)1{C65I`KRc={6)3OPk|3J+?9X1 z;UgX8R~g>Jf#*K2K!F1!4p@xT}6U4KLO5i}i;6tHk`i zSYwLxmL%VE3~%U=zlBSb(8d2PhL3d67cDz5{TRcg;wje0^Xs7BWVlPd8ZOuJ>uLR} z`BQI-Dy-ZUiWllFi9f>(ceTIV@FEBOYQsl6_|tin61wsqW_UkG{<94i`A9K-*VjRx z^MYoaw;zAgU zmQ=qP>l7$);9CqIr?^y;k3)@r!(LNhsNRzDpJ#afe)2cU=-0olgex5M)89}ahqn~+ zKi6=V{ju8ca!3CC)+;d6fy*EK1ybGLE61wu=VfawT_$>NR>GOC? zA>ShnAM2oBzlDVO9|w9%YJc~S6d2~fry1@VzgrBqh9~8p|FLGg!jXTK;o}_m@J|#N z=4ju-D#Z(VOQ9d-Z*~B3;hncCz0F@q^B+mtM|3Wbk|!_{fp|rOATM>pzp9#fhxUKm%lSn{x=G^`X}G;d?riL zzx@oijMFr?nsA!oF8(g61FtgNCI6+nw5(E{-;&DTZh}>?AOF*CP=cKAlzx9a$MAmN zD?VOt)=ca-8@}4`JmM+%Q&k6^wpTNryo?Ts z=i(GRf2EpD5`AvE;`~44_*K)7G5RT)isuqd(Y`r`=R5GVhMO2BmA@!U^Y5&;B)rNN zMst{dit#-qTM1V?@Dd#U=g);VGw**`#H+b zwS{ki11~Y$HGW4Ko=5r=^)EMkxMTcmX{`L&U;fKYK880@T=ew%2x)IUY}^XtI3 z7~a8A|5#Hk-<5w4!`Ti(=Mc@mWWVvdE>V6f#a;8u%sTLOhP&i{r{S*l#c*DqKiBvw zGTg=gF?G<-sRQ3sNB%K$EV#<=Zn%u!6#8eZ;p4TL)%{;({NKS3VDK9yVY2)TYI9(G zq2aFjZ83bnpgQo;hRgU%(f;X%yVgfD57Y8p{`uJ9io4{q&~R7% zht`3QF}$+TZ|BF{sj3i)%<ljD<^3PO47k>vCZW$-le}>`PHUH}J7h3t_ z<%2u-{5#ihr~S(B=kXh^_HlLot1bU=qZD_I?|F6LTMT!#zuq4;|6$s`r1p<8JfC=q z@jayue4gPu9Qn_@K+AW@_k6==Ip|9+RC-tY#?^t(sRLhM2foj6SN{}@*825x)PJ1e zJ0btjAJzRIyHN+k=szjF4U=j-&+sMY_|9YgDf(xV;YOII)u;*c{(NBml^8DTyA=8F zxJ2pAGDymQ$KMpM7n3rR@V!?l-Y`s!$0jPC7EP^R>s@F0Te6y#=~K@23KS#%RQh@N zG{s%=S6T-?&2X3eEi$}-)k(p>?X~Gs;7xDR`gL&77aA`8lVbkbbhFZrS9X{fKp&g@ z4Eu-TC3;KhpE=VNaMi!ka2I{vtxAtS#>HP!{+(wi?&5#3;bW9CiGKb}rO#vTDfqYE z@Nx$}^e&~Za@4QKtOM6?km2iKAEc76T z@4)4cHQWf3+PBc~QI7I+p49y3I`9>SPgA_Qeq*hEB~K~+P6vJAKNWY)59M{>>kJ>} z$UlF+=I@%Hh8ZsUCB^uhXn3xUkLvbKwD!$fp!vJ{f34xJ^=I1CN?+utf3e{%`5kV! ztNba3?{nl|zEI0|^>1f>K9664-jd{J__GJbr#)wc4*Kb~Fd64qU&PGt-2nbWzf|*o zrtyEvGR@f4zfG4P81HBJI0t`L7@kKwg?`;)xT}Bm8a~{S|Iig$KUe=vG<=qWK6jQzcV$U)zfe+>pd;ctre_cOeJiBjO>40p9}j^V<;6#mC0hLOaMBm;5a;+{NE|uWJ6T@wd=$SN+!+ zKE=VGq3bk%7cPGh544*c^ow3odYAk*eO>W#2mMUL=Q!}fHP7k^j0ul1``{gxzuGd@tfv%|hx@sZ-Q z9p#VwSaDbV)*J4kAN$FH^KV$CxU2tX81Bly`&OlQ$>$QoyKAx}{_NbQfSI95__Xbc zk9N@S!QaZ^&xH^FLUEV=S#S6_$tBso@3%wgXFKrib>LIJRQhQS`leqg?iwHc3}57+ zk9>XL`pq`n)xY^Wm42op|0Q+cL%&h_wGR5+T?fXi3}5e{U$|T8U3mE(#a;Ssi{X15 z`B#3c^e+9p!*G}W$oWp`=Q#3TVz|(!m|wRWUhJUn{JrMy8ea?Rzzg>({ai=>Rff-Z z;4^+WFn!^Vio57{7{17%|K|L1;P!R?Rq-7T`W*iCJN$|yOdh|(61XNy(l0YZ1-3Zo z$3+$A-$`ni!hY$Vskm$Yns2zPemf1fVU|?CE#`P|)o%s=pB{cql}$&#MD3us=N1LFJ=Zv%%kMGW%>; zGtGajBmcFAyX0q&;V${;&|LF(>DMB|UG$}f=dyk&@-H;Jo&)b^c%B0vZTLvyDaPMg!=3$C2cDOs?Q_wW)PavP+ypO8t638+ zF?^y%CEYTEkuS+h=$s=~L8yeV*2Di~}!ht+*@yv4)Rw&{x)h_h_T} z=R4?!8=glz1%JlZrcZ&-u7iGs;V$_|YpeBh$yb5luKEo!-1uw7z+Y_gzr=7E-zoZM zui@+1{#yRsBBNiOf8hSvTL<2`-GSRT%W!MfzLJE+E?BFp;rGY!(H-~ z)BeEiE2#sYSO>na4t%@euJ*?c)B3sa4u-qN|1iT{_VIMXUGlfY@GACCE%_K_{GHuF z>*ul$7a2ZE>C^O96XteQ`g}&E=-(p4J19vK{b<7nIo3~$4%hr$?W;1}#h=_x2d3|5 z_)tgvW*P39f7jJPpVwK-&tv|n_+xmf17B#ktA4RAn!l_6@(g#$*C4}P{2g!j{^N6; z$`I~=L)+n+yE zj6b6cclF;Q!(Hv$V7RM&v4Yg)&$RN34IkG^E>D>=Zzrt`A ze>NDN$J(VBAA1dVmA~R>E#HMtIY#jr%s)l@bB|TrB|k$AU#IlNdc%LXF#hKCPv-%eqUZoAIZr>)u^N&+{SN|0nK1%7+ z^i~tjHawqEDeAx4@D56nM8Csu*Zf*oc;NmkH9XIuKW6k&`aDPd78>rV{|3Wd^3$-l z=0DugzA=Wo%AaQVbO(Lz@tXhs{kzlpZc;;Ua(af|g31dYX<)SY%d;?P# zCDS)O;lT9$3>W$o<>#GvVEUnk?{Jh~bkc$8OAQzL6y=XBR{C7#pF)1e8$OPB3jMjq zaF={^K3VgZ@tZ`u zUupOp)odDSS_#LUqUF2fYlh(?l_rUPkKr!tZf^zY~)io5jNUc<*Y=tm7zdRO~r8gAP>N$sC|hSC>de-6JSeEXS-kI`EaK6JPO zJfD$@e&Ptj1y0U?p5f~q`1p}ZA9J+7{11w|=FfSCTg5f8mL=hQ!$rPRjGwiJH*}Q0 z;XKW9o}>Qd=PO=MZ%O={{znDoIOw}yp!g04o_C?*`y6=gXvK3K_3v?!;;#Bv89v*Q zfBD5qzt@3}|C8bi^_JBBf=d*zV~|Ps_P;3J!;yd9SjF>*r|7>T!#g|hF^0Rw&uqh8 z`e}*bF8#9CaH)R^{aNr=t$#l2mty=4HQc4&MVM`R6$JAGzwlcupO7K^=HM z!^gAy6ysyH;U(I!N&2DT-?e_7_4}ae{x7ipDV(JEC`bEd8=mLDR~T;npH%)n!(IBL zpiIkm%9r7@9sLt2S9+Ix44tgFOMgeMQ{2_RIflFBbCBWV$e)7b_04v}MgCIgulXl> zkuVaA%tAFh&-xHW$}d zxQ>9u@mE~W;^JQq>50C%2G?R-KjG?yFTSqBwHlZ29qKn2lL;A#G)LgIL*REs5FF2^ z0SMC0K@g5c5WSMYGz95)Ac)5cu5@WE3*44}#`{_;e=Y&SP*fUrYIPHJ@(a(`^i9F_?oO zZ83s_R`c~$K2`FGugx|h2tVcP4}2xpa(oq1URtyef+!2iH!6&9`R)i;i zAcHdzH2WiizaYrCoKM#LL4y?x)*+}5fywX_{|vEFnin3I z87yv97M>E#+6AV2o+L;}dn3OoqCkO+R$^#2*|7JRl59qQTRDh=(ns!Fod)#X~PT zAevRi98ahSi!v7@RXl7H4>RH<=^xgc95z5qPCTrK_~;TTDU6nfxxrQO@K8W4;$ax= zKP2?7jb_D}QQNZ6%ZycJ1&w2cVPt98GV?pBhm@8zAdFm1d|uY_FcLhST@kj)j6I3^ z#>2)a7S)LdyH2YBMKHZ{JZv7V2oFM4+5pdshYjO+yEe?j8`^MqJS<>UQGo%W$E<@# zE@y@n$WJS?Gg20|U;*_&g4{!LEF&IN4rji>Q*Gm6v#4L#m{~-FH-|G(@Gr#XNIGz9 zNi=v3i3bEvBlgUA*a<8wLsT3Qat|BxDLf3BH;?WjuhE28hnJzK;5pE=LQWmfJ?I9h zLNr*5V$coG1MH;5G)JRQGi1>i5mJ}cNYo%2EJGspt{0W|?YpKT%t8AaS^G9+l!e)u zenHc);eaqpX~UK!Ve4pF*t{Za8V@t$Vf_k~fW={brice0omR$n&IW@+$s0ltt3u~3 z{U2mjshK^QBD1!ce&?nrq9VFG^b%zRKmVyBY@8XpENp?Y!5QhmtQa(Eu%T&1ST8g7 zahOgB?8rv{hZWJqVXif}?6QjROpb)JY$UuJsR)}eqNznZ%I=2%l4O^(Da?#*jRrf1 z_&vO6a7&kXH28YRE-%JiO{DLN7c1dbD8Y~)I z77boSPLek`*%-AzPK_bJgDDun8wO@a(FYmVMuUG2X4XOF5uluhG+EK$9i+(sRfr69 zq&<3dMVLo6A89=^?TQMFb!h}T5%q1x^k5n4ODU@_Z#iX*gFb;kFGi2g0Pn!D%c8+1 z<=7)#%9LZ!GST4OA+gfnG4vE3=GlYa8H|0r3+q@tF6P(fV2gM#2OMk_Y(lNu$3r9q zQxO#nK0piyK@LZ=p2yVqm23##0f4Q)))Lh)uX2v zKi;~uB0Lt2DFR_X8SVwDsJi z4X>7wSF6A)Jj}BP;nfSj;nmWx5qk+;f{yXBuC0;T1)A&nM6OMdAmdsYrY)f`%7Q0{ zl!0@>+hi!3S)+DWU=UL7RTw$0mEco!0|@Q`aO$_GeNkcH6Lfnt22=jw;$YE%fXjYJqnKhwVejF^C<$7?ClNt28&J78CkK#AE z@UIlV4LC<=HGP0-LUq?>O)=*PjKq{hyrT)8a3**|6D(i?jwI@dTD61(uhkUKGX;Ct z)WHkTDhG44+VMg#-Hw@coaQi^NgXr=v@t%b&4v9Vs z1@o^f;$d41HqNxUqP-`Chq8p;ro9(lQV}*lguUJaJs`^b2C^K+$z|bDAZ=VGIxSz- z@F5T#C%`%bpj{Hsw#*qm+PJko%AkR}rbQd?*`PG}fbtT7+EpgA0+FsnM_u?L1{Z=h6i z1H!Si_xc0vE9B^|!Kl$%m?ND~qeO4L@)xP^*6?toJOY_e@xx-umq#=Xre!`E1Vagr zDiXmCR5G91gTql-+X>^m5S-gx0wX54=_qJ(Arriz#%U>$gHAR(f-xr{0;RLe1g35qQQO0 zoPzK%UUTFPqpC&_pjYs=I5FX5fLIJM+PN3GRG160b$HTP6b}u2KCr!#GCgT~@2}>LK%%$kc;!lsVAv+$?Oz4(2=yi93i( z2NezsBQKh0Ae=C1&f2yu&?oW0|arudbUhe&zr`vo~=QOTb78_U@-^rg=35lRxEFf$Gxsmv@>a5M41Ekeb#4rf=MJ zdBjlyj?lbTLjt^owLUQH4nXw8WGO+F*0RJ{Oj&eN+nK7Sq19tlqclgEzkAgxB3KQp zDlc=VEQZkk12CwhL(16y^Y}(zKw{qC-9J_s+;q$?@XPPsgegOK1R|JkAS_wsoXu1o zg0K3Q!I2QmfHm45Z95VJl4cs070@vaArh@M`Zr7KB9)5XfLf_adieq_J{QY?#N5&Z zf+zYpRI%9y$-waHMMWMS2U{CVP{Mu9WL!Uqab1PFw15UhT?&wl6Ao6cs0(L;V33`L(JQ`>K9`juqw=Qj!4qlEYRQ_Tf)kBRpWvzLZFLC>dwZ zKuM>;3xFge5r0}d{8K!HMT3<}e=GwugB3t~Ap^{ZAVj692dIEMVxlVx3x)fQ;=%I% zFqBuZnY*#dLBahw5e6?JhOLES7Wp3_&1w@5Do{9V-8I0iQgB9eiwDnZrPds~*8l0Jjp0s`XOR9+~nX7j59Cf~|<|u8@(Yi*w9NTwtN3 zbm%C~H&95NX`Y-wS>+TGJfPM5C{fKaI0aNr&?Hcju{DC59S`oM6jUL(#X`v-c2<9p zxUGm!*`mAAA~?Oa1J6Z0pj6PDM#!7Ifddh>haBI@yg#?9AhxSuVsDw!5LE|_5&Q%7 zZWs-AA#sgP(+Oo6wdhuRH(&_GAf+;S`yMfA@u zW0%6B;*#mpe()IFb?h!k5gm0f41=vGv~_R~%2dx>HXWd!AlG}3jmN4`Ey?y+@qS2R zu;_lIW(Ps~x|s?$;Q*JS!G|pW!9@ANO<)(Q@Zacjuu&FboiR2$;u*$;j-&@iF|Po; zgN{Qd9*%nx@)G?MfTU|*Z!Q}a{aeAOns3u^DMB}kibR{|`9(>maqBgZ>JkGw&#B|Dm2h}?nUeZ4C z;Fg~8U1dlLnhM-0_hIIMnncyHa9?ai`g?lDWAilcpbGD^@LtHQ8$-9W8-QI3QF84? zKrrXwo`JkEm7;%~FzT2w2Jk=V2hhHHs92hfcA z0N^gIZY>Km#&C!}j5@!Mo{|dG2N}`^4}npQF?>;aZJt&$ipB~SAnj12gV_Si)C0rD z-^vsTd$|kd8`?0`2~>$leR43055}n5Q0SKsFEA>=Gja&N?upgr{<3hex{_h^aXzeo zER_GBl7&es3zJ}-!mh05PIVJ$Nc}gm5L^%!8Q6JpJeYo5wG3cve2JLbj+4fc;XCj; z<2W6Le2E5k9A^qTccAH*4yL zBz3J9q~;qG(x!{B9L4&K>mA+B2%sF#gyuL8F=xiZA*wts#v%d9I5>yK!;|7+p~hcT z_M08H8)3E{pNaXglWC}6Mr#_u@CeXqd*ty66~V^0BXN0x3nti%7xXo_U9P5+)aqGi za_szEMw2}}vVzhR7e_kX5}bBNVKLKrKu8TAMzi{dk>yx{VHF?yjPATAF-Ip%fr`Dsc#3-~KA+fY}+EZ^V5j7ONc5($2Xz&aK;$%P=VTy-v!4qa4VHm|1l>_qALNaZ(UD>|P&PL^ z;gNILQW&D~utZwVyg6zR0gSTP{S?_>fE9&|T7eY4FFN!vL^0%{SePgq8yKfbrjp~5 za4-^`DiivC*|i#|iaZ+p9&8CZLk#adnQn(*D=JQvlL1}O6N{n&5DUDOg=aGi?m{Z~ z^|v8aSRx6b0icaCP=C5*F#L~JYnxlduvMS~eg}yMS1`)I09iI%63>O%gT*XEg4zn6 z%@`;ah1h%HG02t6wSF>hGjFt#8Xruc5-K=FmYAp$GHL-*$_sSG(eMI^-NZ!L)@gKV z!j3!!eJ~J-(5&R_S`3s05N7k=!_J(EY*FXie@D%f54vM;??=o z`F_R*9of1{aF0Z;4_etsgzuNWU5;^75%Po;ZS~N6qF9FJyWnABXv&(E#sGUEb#Jh# zDLUjK?T}5iIwbY@Yyyi2vmdIs)HT>zothX$7jP8O2AEnbqi8R(Kfoxu4yiDTen6`K zW)w|A-64^`fb77dr~&GZi5x>>scT5II5i>b7eLl0qe95}uEPI~tZz9$ky2a# zdZNhLL};!5M$UdbQRHkg$PO%LztPsHY&KfhVzG$aK?jt+M&JbGvr$}#!WzXp48r&e zY7Jq0s@MNU7+*yjFb^+88~!uJ`+v5B=AaGjaq0lYcR@oj$>%`W&`~_k1REZ@7|l#KeCBoVYW}PS!x(&$xN4s|eK((PG)Ea^O zp?O`R!NlIMlz>1C0S**5<#2W@de?2DB&0j|9!Jkm;56h?W2q#SR!jb?jg=iI&?g++ z0ZQ&`aEU>6`ybgNTTc+_z8kdvg>(lSQQ2dn!DAr%jY-12CT_x|6EooB!yjGb3d z@_#mVg2%Nd-vdQmqVq6n2+{cvN&gq3GgAxv0=d+fHvf_6+=TL5M1yZZ8KQH{hQT5T zdsziduz`XQ{^KH?wFqNy+hL?Ojh2P6vfxK-gn+l#BPMT;5VxZGMTdU}1Pq%7glAM> zdSLy4VU&SQ|9Ws4e~11DesfBEg5qH?6|fim-kghXq;Y!{`vo%Pb%m61>SGgIRo~(8 zqJJj31ZyE~kHC3(`;rM*WqXaGJP|gwu9?rAP$RMiV6k|I4n8>;Nmb>T1@K^J3RRV z#lDAC%XKHtn9zsO2`9Lq+TeSneTt5JZ}Eh9@aXDDnLNR*gJ)6kJvwy3Z}K#B&++<_W(LA@X#bh6O1 z$>BVa?SMin=Wu36?^_}rE$^}i*WB-p&4K@S`}!>^v_mjclLK5z)`xVqfVPJZ0Vj`+ z5vhbMV{BsX9w~dU^})S^kpw0k7X0X4SU#|ZAZ)PaMwD^uZ$v!&JDni?!$!+xI~L}C zcN`MM@enbT(8pcTh1{rx*Si_IxFa&es;&`)tZ>zSL4@QDxs2GfYoVksCO6@y5xQx?rMes!t*oq-01(G2u zeBDH0pfos;pf3NHrJZc0MFZ@nXNec`@hdS6(e?TWoT)>DjnL00z=w%pJG%DEaar;P zXX;OIro#Igo`Hr9M8h?hFdYnbR1DeN+aQ;5=q=j#ggmlkfaZYD(QCYANNgqMgaYw6`K?sU?Mjzix ze8`w<2f{Whfw)j)I6w2?50*{A^djuHw83^gQZ+$5EIRIC-+6wWr_#Y&ND@LLc?u^K z_CLujy;1=`c{e%g)C!(2wZUK%pkIzgfso})v_q|{pu9-VcKmugYzKf)6OaLp2G+S9 z8WImC7Qr|wTY)cB=v*U4Lx^X&I^zyy0S&zvRtXyS{qhN-qo=LF5QVW2>{*UVqE~N1 zME|J19*&d9#0yESh0(c|FNN(SK`U%Wvb;{!`OCb9BTrh)XDp^Z96-nhg?zdk69IC) z$C+!D<$B+Na^1{aVQW9aM6@`vql2VeY~{kin_Ynxb9&8&XYrV5KPW{eITgKFCY&k@ z)}xqcuz=MNWdZ^@R)9M^DHLDj7Uu1jjZ=%Y2`{n|<$P^O`C7Tw!jW?W?eRc>sP&3;|$|DHYm__m6VWTE0 zNrif|P_vkfXdh&8pJwqfv*@sY7WY~fL?x+Yv30*J?$9hgua!j`NX%ha^cX!Mcm(PKOnXn}`C#4eC2*9xwe8Xh%;2%ua1&V{|ijW913+y<0u5l&=le~VE1xdy910iVy zTaBw7XkF+GXk*xK&CpGlXJxq3)u#Ku66Z}9k5vuP7F}gvj2sqcCbwR02NXo_~qCGmtgRe+)WN@pRmVD*O z&ER&v!<6RO$9xJO0qwq=;7-QF$~hTRNd+T!p*6uw#&UBB^~SoaPdwO8q!CU80}*1{ z0m0pP!^X<9mR&n+Ad#|-v=aW-U<*ruVh!dn9v|q9tO!$l4s#a{FYsInv=7et;@}Mp zvHtQ!8y7S@CWF{4Oir}Vn&QNajQK;$xYUARA4&o1A2x&w4R}2;2Ahv4H}O&8J6J{^ zirvf{osqA_mrDG`P;B+)$P07sY#3|*H>*&EV=Fi){UmK{PB>^pcgPmCHP)DLl=6TP zjX_|TODK2LYfN9~p298=qasS^KA2RfH$9tGNEs&S7`ZQB>U*OS#!~nG8C1 z90zeh8FyhD$N`Nw)&f}Jp{nG%o{`0z{{vYHm;Dqro6|j(7zMIv43iehT@UOfC6XqJ(E^T?EmoM2zb#;o6>w96Dn$XONCA|off$jC(E3hny?w;7IPA!` zHpcx~|CdK5&fx#w9Cfd@m9t~L#0taO3L~oxx{IR@kNiyWPzXtzU3A63OVKAD{vPA9 zEO?1?Nf=asjVgr|=gCfJFWqab9+3+%g#KUpz<4rNrM;jVk4X%^(Ok-K8F=UU{va>- zESJU+{gJevy2|VTMsj}_b}2VBx(R37B7(j_einTk?pVXXA!ID4y$e@|od%x+7S_X| zY0kFr62V`g!#jAPqa511{I`epur5UX!r+QsJc5Tqc=&P;BQtoSVgeR!@4Og^t91y) zc~lk;R6)0IAAw`>{EmvBecX`;2dW&ZJF5Cn9lLz3aH&6vC*PSnA z^xos>gT%n4wi!4?sVsPiNm}q-o;m9TACMT|xYO=kgd*QM3$#!dXyJc&hW$o-2^~i} zPqKa}`ztce#6f=i(TU&|v>@^FtUi6m^ZnIF12duIFcAi`&=?%%U{%{f%zI%8fQsOh zUIlau{M(-rp&G&`Dz!8mb*6r%td=?7X=w1yPUQh(-mxA^M76|`9iX&P;oqcBk&772 zad6ziwSlDx*c5z2K6Vj<6ME^~vT!)iGGg)Yd_^H5FkVsjo7oY~VVALvI9(Uq#8im* zmdp)qM0Z1>2j6Ss#8TmKrjl+P-GgC@^OB5UmEe?vIM^im=7@?=rUy>1Fy1o7f(R>u z1!v(v0lqP6N9V9OhEad6tht_qa^~MZpv^$li#M zjB}_GEUU3IQjTved5RU!JjIGUscg7tq)iS~&bR`VbFnfMsH5;%Om?(Sm@$aXscymF z$hQhD8^6oyTUH?xZ}|DR#Y*#2WeQcyrDmXStR1Z5S~@ISw3c> z`w%BbPcfLH!AEQw8u9>BwnIu4QEVkaIOLlqsbsL3=`bATFde-P5Y|Ib4X!}Yc78!g z4?6}t>PPQ_N^!8^dFq76NtL$*$;9(5f{ymbsW5p|Hyej8bt3^QfI}cS&Ct4*05|~5 zyFjxF<30~gf_U)C^B9lW!L!Jh43&&H32-FkvEtZ{c!^bpfGtf`xO54Ik@Sb0o^1*8 z>f-uFgB2uijBH4baX|(yLR_#G5wUr!I=ee~4sp?79aCwWrH6SQmjeo2SrQD!c{WrG zRvJX-L*|=_g)b?fHkt1{=39w~#Hr^JZ4>jKOKbx$b{ZodWyE`kNF1vU9%0Nz#IPYu z@-QRbw+MNI9>A-q_X>R6#t(nFQGBQxq3>egcA*h!I+gm_zR7J5l>jNz0sajPG|tc> zD|)7}EJO-$+ypI{#j$}U?p72idmViMLpUJ`koDkGMpOH!q;(fb%f<-jAVH6Gg(9H- zfWsDVJbp{_2{St0s7Oxbx4~y1YM@X;&Enf&{VWYSjyqsfU-EsDp7g>W=-?P#jOJpz zz}SRj=ZSuX%WheSFPOI`7H6>csRCLeArJUd12u$X*TbC**}v8;P&r!t85O}Viy)9W zLD?EiIh>_V?faWL28YD4W`bhAs6sp?KP;k;Y04VTEqfPDK&3DhOvP*1LeD)wX;Wc0 zz^7>wu9phXQqfrfx<2?C>Ul+0KD*DttofV2ukt|z9~sd)!qS<4P6hl*;qk7x zhNjwX<^~=mY=t~Dh^(hB<(;Z2akt61ET10t>bWbp{~R1Thn-XX$5p;U0_@2b!dlsg zZYd2OSrges0~GTVyD4}S?|h^~8-w_9Hj1{-J788cc!ICpP%aIhir{VX3M%ADMmK;^ zWq|KgZ5g|QH^G)@FdqnP0CXn)UJ{?!C>16HYzSNn74W&MfIFmsFOmwtKx5UxtDPiG#jEK; z`VB~73zM;^#vW#r+|C=V8sEFBaf9TwH>n!c?JQ>woua|7Owt-+-Wy^>GpZ@Xo!sQq zdN}Tw++m=@ii1svi5G)B9{ltiWVv-Pc`bAno?+w4Gu>=3LNG_PL1u?QCL6&?@f=!` z7hJa%tpSorn!Pb4?~6E^a{R3 zM!}8D2Jt*0SAYVb4BN(oJ*-c!VA@(Z$+1Kq77x#7H6)&~@&uv1iMeLrtK7p*hB#Gd zVd#=um}_#E1YaXxf>7OPCALQJ7!AI9j=CS}^mdYkFr0(WSpjs{-TxnZ?*ZmzaW3ow z<8NJd0qI4NCZK>QC^kezjVMd8C8n5uPWm}X&WR>DB`Ujs-DvDlg9vu8*9Gjo_ujAv zOLRelE&lKOnfKj7QIqd~edqhG>+{0>&CEM}o_XeJ^NbCwzFe<3M*#d87FZL&zZBK~ z$Glb?MnCIzM{$jB`yl&NcJu-FTwt#szEZ#$!>}g=^>rMMcc%(mUFL z0q-}~$q3j5zqANuH58VwAjE>uiThb3u3n&!&)~+Zd?i~q(LJ9#SjuFPvg$QZD zM|Zot7hJ(4)^K(Z^RP_kUcu_I+)tUX8&&KTxa!;7IEg#Fo*UZyn6D~59hIDICG_z* z%V@#6Eu@JDDKvuSFWyre>CAcR$QJtzHf2N+M3*0;!dhS-JAs6#w<%En6 zq&QsuqafS`t%f+s;IDf?CDH_+t*N+1>LC$WUXv!OqYg~+m3DC)x~&F+wOgtiO?92= zXzRK-oY^l|NIt*o*qW^YFq(!IeWj#DwH4Zsvfto>QpnAA!&eOFkz{lJmi(n5-saUP@HX0JyS)9J|ZCPH>H&)OQRWwf;Fcl;QVv$0O@kIX`uQ<5Fg!UYG z>2@oZ3+cp875kLj=Li2CesJ<_qSjQcMrG>BnYYp&0T_a?of1bneQRxxuxI*pE5cS~ zO(xfGlp;YnzY&xpJ*0hkq|D4y1-%UG!TDE5M**(q1Z}r0(0Ep{nVvUILS?Rd2bu6v z6QmuKp2!Fw=pJ1_H|ILDIWurJJ zdKJo#H0Ms}S%T8D^yRFwFSyT;|0o1d00yTg`^7!)NzUdP8)@si3VF{D%0Z`}%*32v zkpXk<1RG@D6mbvXqc*keV9`o`{&<~SMF8V~T&u{PYc&hXEN8==ML*Kj^cn+bqw>GIwG0@vP3r?tIcm3Zl?6Eu?lEWkusb=oy=+VM!VcT^ zz$qT~b3wz(K4c~q9M91DLya;Eb@bvhzo%JO(!yq3ia<0M)+X-NYzMp@J=-Z(7v~Y~ zE1?u1$XrKd!wwUB#F_HfBmozy0`t*rd8V%np5vd0%@-W~UEZvRyA4Y-%`-kS* z6q9Fji7N(4`!RBm&`J&qhkk_LI#Ga`j9-`zgZ;9nAI@bMKDhfV7GPFT%*Tm-JSVzA z!F?oDm-RRTR-1!PLwAj9K+=pD4|Q(C(IikI+>w};3ASC@b1@6*gyRC7+()V;j2K-2CF=!p=e|fPK`aS}!pO z1kp`MCuk++u^0&Jm@x-n0{=q?A1DC+QkUQAGHC03_*R5?s9+8(_W?U57We}em@%s~ z6V1|MPj`IL7YGJ{G-M>WABl}ZZK2KCp{1(7T`8>8-$hlOqAKxP)}Vz;4|GeAKk2W^ z@R|4{MHDhU3=Rbcr#sEIqJBKfMc2yjcAXT)=wMWS_vBLctC`GMQW>n1qheQM;t^dq z*)H^p*~|#S`Z;~pUjHtW!ajXsS%0(MfyruB{WDShe5>ym)dx!C2%w9eCBx!0K*`QF z*o|;~dJ?Rd&bvwLCH%l`3QxV4=F9^w?F0Y3i&)=?YA`*1_%4Z9@_9wFTZKu^u;<%iSa9OspE4 zFF+JY?V4!phLXk}#E7MeKq6afeHKDvgkt?MbJ%lo8+mNlB80!3@LA`gb1x(*n2q)}^2&6e}m*N0Sl<+W76URN+7ndz5(JSNR4KKY z&@#2MYia0ATku+QZAsF(ebvlDA#h$@vscn}Hc;%k>__2q?7&z*p$abu8o^vs!4p~A z)a;Aw>3fmfhL$*?-36ZvlYw(1RcK5k(U3jM)$@~G4g#1{*Xai|9C0~Qu%}{Jf$6LG z8D;XLxvQ`aAbC%Qh0oF4(kJ)O62uM&_<>6g?(n7dvICborV6FmWj1I8{B$1~Cbp-= zbh#NMG1rX@tM%c~%jm<-nxdfB4B_;LG_%G|rP-@yeV?qWcl0c$1cZR6Q4o)vW7`F+ zMEx)B;}Oh23V*8?U^IrJ`2Wuh3bQ5N9uGMDL)-M`|6-fcrxy3MH;z_w)M04XpqeHz zoqGSznh}V-x>&mn>Qg5A#_z10jaj{~+69t_iL z9Da1Ffopuh<~B=bTDDWY(K;hJ#A%nmup3rlT(wFiui`Wra9>cHy5c%%iC%(Vvjgc3 zx`ae8ok*=lAn8xC0p$RQ{g*iuI(#Z=NQTLbPptcyCxUpoxF6z-kT&RDF@=syuh`9z zMq0}qm=1`)cIo)Nsn3(GmIk!_a^tfF3bg-P?&krusRpRK$*c>5_TH(OD9PmOD*4ji zFoYxU^Pr#bP!^acZUfr+>Ee~FrQB* z*j*qPPs&*RV2Z9~=_@r2vFokIBh_EAKp8J)=~fJkaW7xi;~~;q&vmx&3a0{WB#!gd zo?PULu!xU38f=M>+g3WRpb#XFBHVcy29U&3eilx1z&?nzFB+7fKlf@&ENyb}CeqrmoIK+`3Y!xpix*&qYDbb$~vTn8W|z7u_yGh=oiqF38f$54lNBCmKPC zqi}f!*gt`D7py1`U`FyxKEY{@!E5PQdmdCe^_S5ZKjvY9mgs`G4NTBD^@ph5qxc1){$b>_Cu8jp@w=8U3A18gpwO6X7O<9Eae=`0yGr zAGG+RMSWdl99NnkJlQM|#p|)vyG|`Rnr_6hW$l}qhj2Lc)SM|TKL(V8eb4XTrH(C_ z;2p!A_zxIF?0>Xp$IuzLm*dT`F|9zLZO<;GmF>ZF@<{kKTbKG}q%z^4L`AD{wh%~m zILT}R&-MbYFaDly<%9v0*iwRB;f59DW{bW)%v6mO<7~|rXkzFV{)QkiDuZhY1aZspH{E0nx-+K>&Js%<*5qnM_xoa0W_muUKrxsHoODR3!T0VM*~eGoyv0PAKqnv86$6k63ynrQsQXg^juJG7dhJ*!65-U%+|G&u~NOcpHQSc-sUC z1rQ#|$C(UZy=$^!B_txYqyMukEp1=5A{&E9R7m%S$<$0&idp;AaxJrlIv`^oZ2cSo zIqDxFhXdb)-LMriI=L==av|TgObw4PI*E|z%EuSxuFSST0plN-aZEbYjH|+U`@OY? zF#~{jxP?OC0*1iIQ`Gu{({JL^4-IwdS+;7?)waZ4$>RC?;Ax+-_CXe z+)!)n$M50|;$^Z;ARW_l;(Mm{Y_Yij_p}K^bNtH@4Y~h0e(wZ^iSKB*&wH1(9m)1~ z=Z*sO99j9R?osWS&qGb*m+gn|+e2Y3kD-U!;8>lyE+imR>U0ai2?eTeVXja3ryv8g zA-XKj`hCr_eo3D72fpyE+egG-9EOtv&9uz=vi{CG@3h5iTwlz_E#+)1&a)A5W`k(V z#;xUS#CL4CZf`vsBN=t;S!q2RJGGvTi;LNK%<8t`3t&M9_g?dCtXq?QqnUlBA#Ca) z0K2E()Nw%7*0XNMyA3%J-gZa%>`^=$r}P0I;_=$n0Z8ie@pvQG<%11-uMqY_#2tO1 zMBH+w)L;;C?^{)S^#DP)@ls3SsQyrP1#k+LR{CJMAP6;%fSH0`kF0}Kg{+jWq}i$i zU#z@=}(3M|8l3qV(Z%h4u?O9xY>{E9l1Vf+^K!|gn9 zF`6DtBWYM3n5aVO5-r_)2KqH>GYiV8x>{FqdthsRP4IWsaYq>ztv{ z@yDX$gqsq<-MY2SuhQY887QfdT4j1XjMv)~2?% zs139SJlq`PTSA)=JRp@phl-NmdV3=Y7y3cwtsxlXX(6~rm_@b4;;gI#)o0>PlXT6X zdn>BdWo#2`8NlNdZNVjT2&S(MH>7Lc$aLRpw2#OLqR#R!SdKd`^z zbH;F}qXXl7WExQ9Ea}`ncKY)edu+-+7*@pRtJ*H>{rG(9o%tB`8nWI)iInwzaDJ}$ zaMM<;_aW-tih4mH(axo<>wVjL|6sj_JWNis4ymX&taMP0qq~nNg5s0?zP!RpTC3l0X(7hf2T-~k5F_mR4wIkmAl(Swt@nrEeG zD$=vowO@|^6G`=^)EUnqh(YtVnbV6<0XV0Uu0u0!IhtBpzpru!(*blsAwR-E(Y z?Rs_5@LlFkS4G2wn+)18JoG(LRw!b8;JS2~v(Z1@XF8A-QC9B(tH5wH8hj|~yD0Z& zbX9uT%;Tv9SqHQ)B;cail?|(x#qp1MkwmC?R2^{VO!PvZYW*Uq`$t>cnY&)rB4L2o z9R^d=A|A18rd$gFd)7n4i#E^2^4ztfqigS332uJ`v?Hu|aC$$IaujCX-03h%BSe6w z{lKs4C)T4qH3eflF*6nv*>~Qn+MR3+H?XGMVd!}oK8(YgeOP{FnO!3 z?(=-*uf%~)@0t!@>&}pBJM7gRY`H{v_$$g1(;WNJEiH`(V}j78kr;dYB~wF74l8B* zB*YUhy(1p)wZ3c~F7-LFJb-e#*+`2Mr&5KBHYyYSrdP?BWljUCuB7Kr)=_|syg`A) zFy33;EzAnkU|-~*e0|SVUIaLw6DmYlhq;M)zVSfiQ|Tl-(>E=%(}tod|j82sPUNK#s|9zs@FzGvJof14LdY{IJqMI(cGw1qDaHm(70Jcd?uAoS#T;}{Dc*7j9g#^L+J=Wg3uQ3m@07PQ5qlK z!M;ZBxiCarP$LMY_&X4(6oh*^)FtYkK5&J!2Vo(_RAXZOX8Um*&qDfj2flC7Mydq+MvH#&0Y6Km?oNLjtfgh^)>9IuKo5;b^QH5MoulTjBE3ZASz( z?5M#=*N`tDLOPDGJ*sv?KouX;)qKO0J9RtXz0spWw}PW$RlD36Rhfj>iy`5vPbL1h!N^U3-t=Hb zwqT2+0bvkqwk`RL#BvA}qGUV(H>FVzWcv`VPxF|7^z0K*B8GTT(J-4~8t7XMN><(9Xo}s7W#|ute#FTYFlP)oSEq)r*`KQapq(RvMN##2b;kaVuPp?L8}<)?~_ik zBP<5H@PXMB+(QN)YTa9dI82iv9+5F%nt5&<)vWH{{KUa-t((QosEMVh6oUQd431^- z;c?tj(aH>J1b;p0z4DGo4Z#kug6Xg4&^Swa6d!`l1*TY4&H={X7yE{;6vmirW)PzX z0g?S@>=RJQWT46#%)y0fC%t+O`NyU|S`9SL?i|kbCgOntTxF`nA2@b(A7{3a_`Py~WzVUW*sZcNuv1tey=#gDu45e^zaR`8h? z<$l3K+~!w_xi6KsoxWFBp+~=|tITR&SNT2v4+1~Iqfw6c(4t>=QOJ|yH%ubUxVzIs zPqwQk!$r2r$@r-Cs(0zmsv)}W5@&Kc*U#Hvu;*t{j%1dU3PuxPiSe8qP9du;-8PEg zL^|JC`9_e*zDXcC=(lcQRLpRvRwg<(KAuh|r~)K|zbG-^IF=C&EixS&Wewi8(awD7 z>tko6)}^=Tb%Z#l4710=wij4dXp2?Rz6mjs7M)X*HH_zDt4?lehg`pwzV*)r zy=;^1z5kgGEhgndi%ZsTU`d71hx^s~2iAGyZ-pHaAQ_nPyK%##2S5fG1&F;*m578WGUd=pWeKNx zaq94_jZ`KUjzgz5V_$J0*cV-~=(k;Z_H4S`C(X0PVIWlzJD?4g9)Vq*LO4#HLGjny z2O#S7=-F(Pf$74hm}c~hq&)1XxH)mLwM>E*`f|NSPrOq6Uv=q0vk&zf_Z1EI&iN$r z(gSD9zh1&ONz|?7Oe*Q$*{*RP3)q|*80TKgbxxO4eknjo3_&5U=ky#&I=d4aw~6dgM5Ig zZf|CcdG9wfE0Hdx?HmK~m%JG_*Dr}?yQK%nXe63=sBFTFjwYI5k}qUxPy#rB3XRUD z;`x+8dSD5p+~ON8ZrziaIfHG8LNA%a+sTY+zp!cMjU8}Ge=ied(?AfFL62B=Dzc%F zbFqrvhPwYi2xG-@>4}R&E(I}qzFOgp$=)?UyJIGimT+$ix${uAYdYmPMg)t#>}|T- ziCgUHq~llwX68*(cA`H6T(Eq{V=$1*V8IV&+7ySfZ_|L%bP)6)+5m1nkl03;fOD|y zN_W3P0*I7^ZN-}npOa%l>hNAqPbv-sOVvN9uK;r2RGgoVuxa-7O8YmFj3eB@UJ3Lt zoLrgI5bsU`7Tb&VCdc>-6v0%a21m>ZFyk*^-}LFRp++GhNP6`_kQR{Li(#r`t zpd&Y%p(fME$I=l*6^B}&e?v&y87Ej&-l=zfQe#7w+T~46$I{nLa6f5o^)uigcrhKj ziVnw85DloLNJDtHXQ(8ww%%s{>>QlRG4ZrKkbS@eh)*~ZIF%;HW&6$uz}&H1p33tQKq^pnQGP)YeuGc7m&x4AeHtE$1+?S zODl9P#5Os`0`hFb1N&k-3jo79b!%kuz#L;`i8J*MQygOL{elTUP&!Edmu6~8k-~{0 z3248!+|XUo(su`7vh4_RMXV2Yt905YsCcEh*J1b)JiG0Wjy_Wp?E}E@?Dx&1i{RQT zWGg)*fJH^1RG9Iibj#iFo(SU1Qdv>?QhD{Cvdmng$<0^w7Gs5uN$5g3%XFBCe516| zjzS`CNAY?SgNFOiRLK$G$QJ9;Y`<`o^<%$?RnV+48I{FQ=S&dHB{G9{>8{;TTdu*2 zU#G@3yd(Cf_94J*<2Xw(ce)(KibC#M+F4h`7z}+SMNFVBq~4;y5M}>4kgn2?_O%^` z_F1PtmE5OFd1KABBmgL@Y&>#527UH9F8@9O)De1&>k;A{S79_C3Fj9#J&FOgn6%0BHB+qe70!3MF00Ek0 zeFhFgRYkJE zAZ*ig-#dKZ#M+)6Af6h;6&%0xOpp1+`@F+RhO~eiuExmY_slbg*wgPYeex2>tiakJ zEbsA($;sL~OS&dZ4FLRF(51|)V%JT{!I|#$%=T$`^#qQ?V>Iu8WPjNPZH(_TD}sQ%xuT9soe5{-(|At_4ZpKtTf22HHyulf^RpQSEPsY1B*yM*_&p^8mr;`jR+R>9+`n&W!co+H}-Nd+f+LR@pr# zt%{FVaE7@(^t#LW>v;)LU6r~p$lf*SKQ(`l2SOciER&huae0utz$zj^BZ+O26V;+9 z#A7{vZ_ank35OJOwo#0U!^d5ilp*$TD~bpYKqdjlYCI-!q)_ zF0a%suk=Va!0IZ-hd_t^(7rTa1?Ji1hDrR$rg3MgCK=w+%fS5zI$Qn0UG>ff^Yy87 zq;lMKZ=bl)&7cwAe;@SE+I(^r+4`r3XQ>R?L%zw(L897-2GfUUF{A-$_OsLi{mJ=4 zO86H#%%rWUc(`CLS0ymehrDm>OJ@Mx6SL|x%bQ)iB7(wtEQRJm4 zVpu;H)Cq`9Mf4h2! zOuBx2Xp3I1O``ztX77GaGtKN$M+;~$^VlsP5x*Cg62p0VGEhgocz(TzjgKoDIV1Pw_`8Hqv zEq~#F&~7i?;CDE+8~I*U&-AkLKa{gMY*!j#>L~IBeQRcyvwuf{HzySOH($!lGrEv* zE^?ee)9rRy3D8cB%G=pgY{ky_phQHGuFl|hCwX!hamZa3ko;pj8$mOr?}C#qHzQ2N z-F`$Uny!q5i0ANN^z44r1{GQT6=#L`jig4yp?SEM?|kh8HR8s-B(LlPmdYzjN^npQ zndC|91@DU-2u$oFCd%t?5~SoFjXK+bm-{Bt&p3g4F(|sGoM;5}k9GA*k5MXz$0C`q zik-c+b2jVUNE9BTp&Q|Y)99JTCzAO3ET={oX)m|^&>Th zp=h2pi%cf0)Xw!9wCyYzl;$}d{ruc$82z&SB1D_d*=9E=)w#9<@_B)893dcp1rc`l zgQ9+sQB+JD0+N?4=6rykp<6wxi=*{4JH}!6nw=5(Laj)7Q%ePfv_%MJDi~t>y8`M% zKZhy;ISeY0!@rjtdBzoBkZ{AS5MgLex{m8DnAp4$X!7hc8Pn6xGp4nLpc&J1WPpCO zL+LC^DCc>>G=~9O0~39w$z*-8Rn|Y5#`3*vn#3ujW!ypu`=79TM+ZH`z9>b84j;l{ zB#qh+UcMOkN-egi(cj%@Blo@kPBqKx|V_#Dvv`7baILX8n(MbP;?GDjS8 z5nonJ#Vg=zQ3eV^PS}G4qHB1_yO&K|!6d?9(Oy+Y zM0WYnxzZ_zy_%{{mWR7{NI5py@Q<#+x}hrST-ZjSN!1UVfULo2I+2ACQ* zXI}g(z4#Eeqv`j+>4fyI@NDn^gAf|Pa&w}5R2Q7(3Xi*~5>kKE0BL=MkP2E^=@&S& z8qmU_vD?xICP7-jnN%V%Bv~H`^m<^}7CZo%g26ruAibko+RoD{|LI)t1@qYO99eO{ ztX3e7gyGglzImb|jnRu;*w;aqZ@HY2{p(z$EV#X5AHQEH;pne1C4-v;!|% zg;MH+nj%%&A{`y=*;svAP9IFDGlX;CG9*Gi&IZL(;4W6@+jV*j8wY)1pfaVr9x;y`j36#F$!iS226w zRM_v=tP!iz(`=CE9t3PelB%M?giIPWg4&`Y380Qm%=Zhs7 z4(T~d!E&8r4wPQ$DcjjT$9FBF`x-^K7ttn%eXiL(Yo0Xw2F8_sX994$y7Yv}yQTBY zhBwNH4l)|@sph+BY^}%aA{|&zcHoiRfr!(Ai0

    =m1d^#@Mo%U9*UX%|5_*7Jc=S5$WYxq=%h%!{p9! zY#v1bEBDt1(@j$3#+$sj~5sSnbPA-ag%H zP9{5|`x=eOCK{`KpPyar%gnw(BHpsvTcaQ`d6#s*+4d}=gN&}W+7}nA{jgaDw}rU_ z5vKzY-8T`94iJsie#Gpq_5!mHNW}AM4_w3)#0-K(H^$P)I?u^U`SI?)7ar7%j1Diln`Y=m4Xu4Pag|fal8r%*~BOoW>%$Zz39v5#_0KPExaP$zttN zkETDJX`zXxHc8zN(ovSfn`8Qbw6q*v0nDeh1?CR~+)K>b0?dCi?ZUQcvowX6-!|>Y zgBx#v^8s6KLg6>;mY9T-@kBEYK{jpUo=YGKu&Z3>SUSEjU6eTwHQ%vzs8dyHviwv! zevE}7kCvO@jSLc?HNn8v_I1j^if2AAWTNQg4z~z2?FSa|*gz7`nIh*aHw}>nc9?A@ zfc;}LNga*MD9=*`q}X69P#=Hu^Dd-;x4)`oMyj{qxAKRdMwD}XtZ@#Ycy0swLsXt- zI2I7*49|_AJUF}a5xqrv?;r`X#yI%U7e7yF*=lLDl2(HpPam8~Kept|G=sCG=VFEb z58GU@w)VelokCtIY<9&6YFD(Y&W-? zimanPy+Q@)G}<%-iYbE_(~zrGqD1w+pqL-fg`Wk94bD-pAO(t)=|$VZ_~lIdKZ&%d zp|)cU#*s-943eIW@x1xIo93F03F95O$ttNd#$`!Eoi z+~DZC^t2f#q_fSqOMu1Zd{lBXF+ku;jW+(3dgR6FN%Q_9o%=jU>tB5-F{cbO1m)IXE4Y-^SuH# zl7+E8Vq2eXbm^{ll?btP&A|l`bWkH%Fv$t#1*nIp^x~4LOqfch;ySzj<#dx%u~9_h ziLzUS4@I*ON1XP!P9g7N^L9&oll)p7U_Q7mHJUso-DZ)k-B7?ocIz-spG{ zUF-}z=nN2I2AYElBItnoG6U`+CW8)f&JE*)aMN^858J2UnmTN|9`%Wi{u+D}RY5;` z0WK)Pc61S>``Hqice(k%<1W8=Kl%*hqpASN;9-P%je+mUF@fA~f zKjTl{Lq|XUdoxJ=bgSdmB!y99n}j)G?*x8PXf=q_LrKpbNGX_zsR)|*3LCe zWQ&$*I0fy^j-khJ3o6WPp44X#ZUSMkO9y)EWvK}_ui$z zyrvIa;qM1V93s!W(Imov_J&nQ1e%2>+y~AG!H%T{BiNz4u=MiGW4MASX212GjQAD_w(_t*1#Uc4bID_t_mmC)%ZoF83 zoCtpmn0uYLFn2=094Ep%X7DibL^zhh07hDF;&2vtGd2gZhakNsMH5>vJ_%^(M5z3u z90g*U_bkwH1xx*tTpR3gK18b0S>}Pm_`p0qCOw>}_-+Z+3`TV%{({3`q;SJvx=l$_ z;Qghb`}#H2r9)zlXS_rO-IiB2Q`jekvS(HLDB-+FdIGj`$ zo%1T3u(knf!BZt{G00ON4ZhWycIFsbTf^5!!#Dfcx>j!u(@JK4m3CC2w=}ZAMs}jx zYis13XylnSH-g+Z`i^9M%JgXgO5pU9T`!>VO$!xZ@0s6)P9h%|1Q*NP%AqX=JnFT2 z!M3FvpmAI@_KxKsVz%Y!CJ@T;c2TC*HYD_C6 z!Q4ip*nSi1(wpYqGo4`OZ^h6}sY)kVVOlmv6l*ww3k)_HL~xBDBl5gH2c7^N{zt25 zUWIpd;S5@k?4j2f&)X{(1%(STLc=T@fsTs-3SL<1$4?7 z+i(-Q2@%VIfO`Y;A`rAn(uP5ABL`$Qh^f};7?7f7cwAh7V_^~B*NB({$HQE52JEId zu9)reLMaNNJn`3~(C~(elLz7Mwgro*A{riLHU^;tVUBTLKFkWj{2!*PUN{Zhedfym z7NN!n(Ii~Yv!}VG&*lsIq@Ai?508uV2~@gq)hv3!n+0{1uj3lHjteW)g25h{z4p5k zw&e607ONp<44(sED!>JkEpNvvx#Dv$w&`{nW-Fvdi}z07U5&tj5P6H`G6_;Qa(UAIaLp-$B=V8HJP*_sd0dw95X z;!pVLla}@NMFi+o4z3q|)>PTVeF{aE?$Ku%*yPFw=s?%%U!hjhZP^t>Bc$6hB5~rK z^!?B*9>9LdRuqSC5tn?2#wA~lN$>b(2My<+CKh>|PNcR$FXL~AZkaD=HSFxtC(Pg|g=);-n61<8`ib`E7Sb2FrML8b{gDZ4Dc)B!Z1Q;h@UG}jlg z-|N(feX1V7Akn(jq#v`K?6-KhhSm1Ug5qj6?G6f&!CdKB%BFYdVuawd4TGMuVC<){ zvE6B&#L$e?;te79FKbK8!$*n1zecPq_oIj@9&|GTl `zlK;@4$hyP-eVWDfj32h z4uGHrvP#k>nTN3K7$^=LBIt>nHt?~O1_$)X%V^>aF(AVAU?yoYMA5i#LGs3JU6yk} zdfR~$tZxpn#${q02s2)3SC330%Q=hkKh-3XQcaOn*zD89tC=$azLkGu{f8mOeZ{RN z+Qi$-AL$1Re)+4+Y`3&XIeS?;KHQ3XDDTTGJ%|VU(wl!gxWgXp(+Sow{HG)hh@!5GnBt~`5*aWH1=2RW5q`A~%wYZwFs?PZxPkjbfDA<`P$-lr_5!fjL z)gUMdYYa*M@JB#(G_<)Dczo>>r19p={sdxQ4ne8Tk>q>#3MpFv*;D?4RmgvuaFQiW&Ln zR7Sy8JJfWUSJxKdpfqs4sPuIW@JW>`ArRk}5{J`n}& z8qHT7wxVM&8$k6SH}n($>Mv$N|9ZSd6iE#e?JkP_2;~WNMGvr%ebap0dOXQ|p0to& zFr5?f7Rn|AyL(e3-!#Bbx6*wok-GGKW75;%-Q5iyhfM&u&U&?-?!d^eAwg&3 z$VCItIM=X3gh-j}l^KW>H$)%te!BNmmX{qBq59ZcrZqOF0s9g5d^qmMO_eRRpBsn$ zT=r@GhMuqt{7Q?v3NXfzd7qO0XccCMOGkDbgGjDW6qpm6&48CQoOX>MrHzeiVbJ6TOr<# zVx2Zguls46u%;me7=^$!$!!LC0i6UF?~A!63{v~ZHNh#BY2MJ!Yrl7;xaaMdFXJXe zYqP<7p=P)D{1Jw@BR8JiEisviFHR2qad;SDaH5T-)*k3_`ZT7aPNsibrxmZ$nLhP7 z+9^j%!$YnEtzsG(LO5s+MN7HYk}ZLP?Ao9ja(>`1}U53XR^W06C*eX%{-)l_wA zstUTIaP<#6iZ z?U&ItD=MRKFuAC*J=_j@Z&-d_wLxpLsIv8<%1D@=_&N@$ImkRdk49PPWqXzdH$!5Paz}Pj@d-xI>QI22pjsoCulzVov{) z>4p;_(_hNzXP9m{5idyJbi;{|=}kHP7}E`dlyp~kbg`ZPnm-?2wE97g9%^pGMef<=hF<$9 zf1br>`}j$YHkjLRk-NUAyZ&|XV_o{&$vO8-4sbCG(PY?=J)D)(KQi5LQOk!O`^?J` z`SbflEl1_(3UeDygskQ0oW9(2!$mDkMJ<2GpWi+?&+Z91`lh)J7rEasH{(1he_mF! zc}kAHZf?Ux?$?UC&&cC^si=ES&b`#!h7%!scxFz2!F0n#EzcLV%+H^nDcU?IN1rmc z;Y7$<&duphnr=910rx7>YQ_O~oGdxQNoD4Adl74l*lk5@QN(U7Vv8eoOA%WVv73w7 zbrHL%h+QAC8;jTt5xb#?-59a!i`Y#OyRL}c9I+)u?3RcvE@HPvY*7)rEnMt-&|;v&Iyv`d(GA;$e>kILp9#`MC~ z#>HIH={YG>0oAB{AVeVMv2+|6=Y8VDaCjt1}NV)LOdN}GdJBrVO z5JwP$k_y%o;#0B}j`VKxZUJ!pnSsMfBCtufpTuBc${PO+9i}JWE&0>@txwgzCBd`qM%}jIhJ0HSJx(#Q-bPKUJSvoV=PP0PT3!w~Ii+(C*Xh?(BfklYxdHW^s981CH$4-v!*wY|mg`I#F^Uov{qWlEWJC%~{6k4PG9#l|J(1 z#gtJDhkrr;1UROrPNYMKeb1w`+(mlAgi$$8%Uw|sI?;ILQ*0j3-BL?umQ-+2`zb}O z2u^sq=S1L%)8tC4Q?7NF2}1y;WpE4C%PHa`Z=D(lOd;?ypP(Pbpnvu*SE$UpdAh-} z`sw6C3~>S-si@J9bo6$<(#>n_CiUKFW_qKKKXP4Ru3@;EX>KmWTnm|^G40dCW*+Pf zaHd6!fb5M?-07f zSh}NBV8Y(9X!NS9_nx*Rg+d7P^JQkZA3*Mq?=@9)$T21}%)dhdtrrx00`nplA{-y# zL?~UnE?B<3bw5Jp&$%m~@YVAr?kUQ~BG{{g@eRIe7PfLtAyJ%IR~eaEt@rK$s(aRS z*b5C#@w9?FOF-!gd+h4e@>@+py6*&f#Ij!Th9JVqthWaP7A1;J(ye}Gmag-&$}d=O zbmD49Vd$CWnWz;`05ZT(H=9QZHuKk=d$#1MA_fzSp~mhZ)`V04SxD1ZDr#S3?X?(N zS+a(_<2V9b+tg^Ce7U3i!jfala*YLGGVgqY5pF{}tl^}vu&Y$5zYl+ms6{ zYxGwzb3?^rdUWq8c1h=2>Bi|vo8lNF8ipxP+s1}0l@`!y9f#n4w|T1;GKNF*7(OV*Kr}>zA328pj6vMG${2RC%U^g5E051(I7*znJw+#ZyL_}?O+2SMUxe{)W0SX@iqM(5@rF}9k577(~vX1FT9#o>8Yq-k~ zP`Ww5EES!2l`ci!2P#|PhIGz!6iJvw^@#2vF~eL&4@n~^OAKu+&=Y;?#GHsRv2r^3 zd$3mn;I6*c`1kxWkMC(xV%ND0)=8wxScsuL#3Ia*6UqSvou892HboRrr_@}MK&9`V z;103oOr(N;W8d;N3Q4{VnrD6SILVV%8NLPI~T&dkVka|wj`TS6c5ayCzYEO&WuS6nWqOCy3jQj7Y@Y3hfE~oVg>^+O6Dd&k2%2d)PymJ&#b#oMWVX%P65Iq- z18OFRjyOtr@RJXMUzC5rK^_mcmX^I4ul8a;{Da&pd_(jp_G(lb$%p|x_lJKy@H5% zcj+jY2y15Bcb-)l%7s)3fjyWnD6YK2)<}g)rdH!t;^ng2g0{r!qyYB_R%1}z`MT&i zJVWk$z>awwz>dVySW;=^IaQLJ0_;{|C_cC`473 z@@(L+M(u>?8e7#rD4cugEtV2i0w{11wY?~Rz4wdgel&sxtAVBZGnuDMnMu!d^N-a= zcd~aQ+QW`EGbH7X0$l7h_Xbxw2b}0$GS+r{GC>Ty3MU87E$7|rO8dpdLLC3?Ca5P5 z%3`5d7=W1*zQdT}t@ofVD_-`T8F&bO1H4?$!Qr9X>0F=Q#MxasK_`2|s;9wNSW?I-~Z=%*+47f=U$sm2?h{rimXT6O<6+tN!*r)kc>EQ)@>Bu%cbsTd9 z>3*Ic7?D1!J3^OK(l?|pcLCtIz)}kIlkkYb5?A}c!nrA^v1X!}q0sEmW6WeWj)cjnYf*9rA z62~idzUt+Py=meKQp8`I1^GbiOrx)V0SoX%vX4APYG%2^})lmHqIJUo!P)bNHvf5thR%@l$=;*WxL~)zSeZ@A{n5lodBQlYu z4hMx9%rWTI9ovJ1iQz$)zZ?eY+4qyjpHw#SrQ8HFg2W4UsG@hk?)h!dbZH*#{Vk&< zCFC*>np7+;eo?*}gJk3}yxohL-7BW-!d%ItEtQZWBar}3gF&N0l9pQoL&lk-}0;HSB42j0|ro-%?k#F{bInCS@gTNzzhX-tfCLEsXb1YoOUGj4Q*j*9TbC@ONed9mMmPk4N&yae z@c|A+Gnq0xtPHaeTZd_W$~wb=;}sxz9Lwww$3+cJ>`LD5!?&SrH0wMzl6Ro4t3dXi4DF$ZQ2WUxIBZsknTmY{xQX|fcqo3 zw@eH4BdJV7;%cW@Jf_(lQ#9%C@TozHt@;7m>GJUrjKf|Fg=RHXi4%(Bs=_N!mEIg< zBn<=Pu{6ch$FnvvV2FCTPBvlYAFv~qL?O6CeY(VCu>ON4dsKH(H{42C)O&GMs*!3X z>{rgyHRe9x!oM)Hu3}$Bd{g19nk>ww1Sj<-!A8JWq-7j8_gJ6pT9XWh*Ko?K=QIYSUYH)gqt^R+$Vm8&c5mk4{6 zs;hyDhj1gaik=>)5nn_NdNl73pw3^w8E{!>+IL9L+)KAnaqpY~`5o*HG!>@XmwiR^ zw)6nlLM$r|?aHt6CPg<3zv5w@9^+c{A_77C+7)#Dq+8|5e09`O z4gwxEdqZYt#b8me5-U?7CiDgGSf#SOkZQ$K0F*?TUTYUieS9(C2>cH9nE*$q809gE*aeV_TP- zg+7+~+?ho_#X4>Cg_*QXaa9?hXS$nojRy=It7|++Q}n>`;QAav8S9vZakjHHli0!c zn_d)DG5_5mm<}6g$4!zjx#%9FPfK;4H#E($%6v$X5{J}WlUfcb$Zz`MB@3|uEMu0% zpu{A9I@`=hd@JkoumlFi`FV*D&T0FTMQXV*PO&R=`nT#ScDL>JN>^G_^C>2|364s4 z=8sqVku!vYD_H26io??Feq29Y>qjjlyP@p8&0aN{!dV3c$p-0_K*0mpo`?bSCe_Un zGVT5JRPQMlUHDS&0?>-Uhr1G~@%L~A&|c{dt8+yP-a7BK^Dkvp*jnYgJJ71!^E@!a z4!wyDkAYGM;$8(^?=PwMJ*xR(NTom;EKIgvI&m;P!?wzhi2Yl-bIEZaZI_E)rNUpk`2-?dmUx23s2jHvn@W5E_gt0=e>6=cv`x! z$+Vawh_~p%!_kF>c7b5gg^R2sh7e-6uLlVaWK*I|7s^@ax1us+dF(Do%4%>}S7u)i zXxH{fmE zrqkC{MPI5IK@|{+V5wXf9ud2-iS5D6=}em!^KKUeFmnyTJSB6ELH%rrB+mUbD3{*U zc4eBN5rM=(B6v28G}9sizLE&Z1la@0_{fn6BbbJC5sjdQIx^o$xx^-$@1&f4kZ;zh zksJ=?fc;kYSm!t-AjiSqx=Qfi0K%CWskZGo=yr{18HQC3%bcET!g|dRL_J~oED8O% zjKY{U!j#8s&zsE<*$mSr{;{YVvKeXRoTD+qTxxNB52JR{w^|&kiQ7XH)2~H%k~#8Q zUD<4>PyhkN>lP{D^CaPO87YZSz~@^;kQx~qnTUj2*zfT!gr$EODLLH3a8u6N&2Y>) zQXG2>B)=0C9h)m6g3)_@<%gKMThW1jF2X$wH<=Dq7sXuq?Eg=oS4NM881kZvb%fi- z6@cCnrFpkJWe}Ecc>ap8@dw-7Ch!>G=wGnH_WdaQ<3vWX@N`%w-ejQ-4 zbD&7}n~$kO)j*^-gm&i38Ea)~rIWNv5CJ!>xytmG?D#0qryG4d^)#2y=f6y(ytfYA|U+t{F_CJ?mG9@l+7R0z?4RDz6fdqMzbn{DLv14RVrFJhkas z*SZ_^4}d=bpxjNK**sIl6Wt)Ey75M=J~BJCK&o%t4mLt zWk5RHREEitbG&Rxb&gy82fyhFGwqPhGZS>uhDzYcqUe4@_%V?(r2K{~QchaLFO$h6 z;G#g^xRuH^T*yP?;$2aSHy17QPqYe!vJD=Q=?)tloUWlk+8NHr!L;ME<~Sj{)sMr| zB1UDKSWQGfS}K)Q9AB4iFyD}LL(bRIT{rLuyW5ha_?5$MyhENDd~Yv$0?GcJ1U}lc zoTq7do|qvfl9Lo-j%)!2s&Kf{n)Ve{hk!>}&Xk5&#$a-of_CW`R3fdPBK-uvjvxa} zV&@+bo>`u8$nkjjJQBXvK0WK=5w0^NT2X8}og)$whH=stS4mtjDp;|SzmB6zrOLKh zGoG1i$X~D zS7t}8KAwAs8)l5`@I11=+WkR}4U`wvAuKDk|5VNqpd7{Xf>^U0CC&K;7jii`)Z4q) zUq!=|r|i?iW6EZ{PsKx{Vo5gs0dTpifhp+1UvtjG%{el1Zk2Pgh&ksC)_EZ3QO@}X zaz?)OI((@|?1iy$WPsr*2zqZ_e}^%>BMZz|>ttjixw3O|Wuf1eLu;Gq!JO&hGLvQ= zxZZ?X;bA(m;w9%MIv2v0TMw70Uqsf_HYjn8f%|oQVJd)6(Wj2=VkcaGI7{3!-S|GB zL^u>-x0tmjkiFQCsAg|7!OhobLZ2M(!nW&Py`unnm2JZirhCk|H;teynaYZ(6oG0) z##6n6Su4Z1ScXgRY$DX#o6lh^6lqZNX|w;zv5;;p@G#q2Ch8faPZ=0>3w@XUj87FB1uFFSXJ7X+-&5kMa!`WjgfAP zfvNeY(>)k1wh2HV81YGrB{wk4o++k3V2H9;_gUPgS zU8MwM>9p88lS1A+xN0wI)cZrAJ5lA-#g&n?=2!)|E($}vkPS zxZ9z4va^DwQu=rxK2OH%s7y91lE#ic@KR{Cf(=aD)gT4+Y+ ziKl45kzB6mVUA8}a;v~+*k9YDLt)~E|>JL*%Ns)5WPSO%)9s@@*UO6;-h5#qAzZJ^*cKK#2U z>LR;|^!U-3C>%}EV8~j|KlI=Z6L9Rd5_Zop=K&(cr_5H1c*;41wu?E0J!+XlXKX9@ z?befbrd>q|@X8t{kHZ%c#43|_N-=rMT1_73C5yKDy%7;wNKAkU7x6~2K2CJ1+I-}(uXLz z=n_W6=zb7hNGm{s7HT7lm}5tyv$gp{=QqfAT8N)^V(u{O^=4%~vVaHXD`|;eD^XW@6 z4E)1OUi=~OU$b08+mKnZNPRS##_SaatId>bTJ}=hj&|h>ObG%J5gpCs#vinC!@2Rt zTQ%;|N8{)hrebybzufp5LEA&2JxY-Rt`Wa`gv4@9>oBG-Y+`WJ0jjG)|I7^iAGL!1 z)=*sn;o~{q`DH#i1GZc<^q2YWCLhp$Wtne7^2z#^@S5{Mvj*Y6N-~Zqb2E2RJWM6H ztUyknQXdM#2xfA8r#0`Cg}L;@mKHPgj*&y_H}AeYeQcN9&F9K)%9b~eCAR27#6CIG z8)bEU;{>Da-HRIaG!lSF=Gi&(ho6}_6`<1wHO=x&?Hh7~-CBv~D|*#v1MUAzC+j={5VfZtKyjl%VQJ4G_OI4 z0guo1zGb}{K9x@CKnkH|g_J}DYz=hI4m`iMRVq$FkKmWjzwr=Oarep@UUb|!V7+IZ{e zZr10EsE}`NZuYI*?D006&n+=uFWQNHTTaXKIosTpY@Rf$WqXip9Q+rz%?2Kgc@E)j zGoHFJ=e@pd-f=;wBe+;?WiHS8?`_HN0zyZuq3(G(@3ZFR{YFk>aUT5uroIvygyJDp zRwukjA{;T4xvs;b`%0cjO$BT>^EdS5Ar#0@z! zNOJ=z2cY;{-)u@NXFxtd=^yzw&V??V3y9y8UX3Ym3ont*r?CvGbH)=RE`v%c~pW{d(g#YC2Dgch5UAI&%?Ex25XflAgbe8N=a z_W+E!S;j2rfBGI62$pnHj2^C5ErV+VRQKwKH>@S1EgjGUz0 zuN|u<&#LsUe&Y-6&W=^cSXrc5CDPb4@W%S?JeX#Mz+A@oOgIj5HCv%HgKj}&NdHGS zurJTprBbc@YPG$w5B@{zD?i8^!wRdFI!Hf>PQ3Y$-?5F!K}Cq=p7B(3z9=ccZhN!gJSt zSP0@mBrH`+bS6Kv26V=)&`AJoJXh7SYNE4l6j(btJL}Sn&K72LrbYq_=|OT?FC2nytctUsXDnVvBmk~?vCv;=`8!rkI;_(BhQD){NHB1O zaI``Rt$CNMd3ru?muQg6-L|#tl3eHB_(iGrp@34?=+(8=cS$LGf97uo$qCHox(PFd zSz?E6d!cVbNEQ?v(5VA)7kPM#b)bi0%t;&Vg{)cg2!dEjf7T!MDiq#*__6L&!!ygF zFSwEZ!ltcFnQ_?CR?3XoVGzDHWyW%e*laT&FcHMnIn%`Ym(E&8i}X9o^j3v~p_US!z~$ zJ{?C-zf)H^y?riuR4%EyClC3r<&fP5&6QjiOTo>|oavDI0E%951T|+WdC2!Z3CKB; zOqiOu%O+75$VZJA+5|?Ds?6LLj8x1RjB^;iQ zG8!%Oso1k;EDtjd?cXw~Z|nGKOtZYS3`_hku51rAJ1zqn?no7}%AO1hG3cVokNJ84 z{o6M~;=Nq-YNg4-6oKy|%Pr%3L)50a0iEMi#4tsw{EV7o$iiH4L*<`H`nQ&f&-(>Z zZP*eHp%FEVEuD=PjD zJ%dAS&bh@B6kB)(CW<^rJ^^t)Hi6NbPw5TNy92pOGr>b>2(yBhsp>MS8kO#&Dj*mq z|GU*O32bXk1%BfsQP=q9x@P@kwYp|g7k75)wAK|p@qt07pQ6+JR%#6uot__^ekOMs zIBe%)V`aLGvvi#!2MWrph+jHQ2`z?&$wNxF_wAWpcQ9i2dM9|JywRY&JB0s1nET_Zzt029T~ zYs~NhFpPh=X6W9aruyG1+t*gV78b)RIZ?XQq`TTRa`9;>epV0WXi4+hBV4bk{!L}w z-3!Id3}%3oqL%b&pVG@)bYcOB#A#O+hLzdrSV(n7pAD#Yq*EffyOsC{; z%Qmk=NXf?&_z0w@x5(TsUB|PRI`rDCXm&XriHB_`P&4|?u4K-$^VR6jffaO4&8f#Hq zle(eN0|A2#xn?kUS~OIRL0Wk^PAuK5n0g8CBh1jRE7f}xoj>ju&B`u~FC+xIgle^8 zqt-an)k(L-$SB%)C3}a}MubVQ)HlR6sB%hkA>`ieYZo(ZH`3plYzI{yf>(hQde`8l zqEqd`dJpSS40Ov8I|_p%z(Cg|NHaa_F*2| zymAb%Oykhd7Uvszh4tasqvkKs3ziY!?^&!pbT`QDfKagFrt?lNm+f4euI6y=S$Lvc z`7H=kwVJzA-Cw;OuJXV_Ybu#vKLO;a(@aJayMTV2c$`6cv zhdjT?#ZCd;eU1KF?EB>*FoSz6tIz$b(zLNJV`^HwFM<(hYc?Zi;t!~uU1@g}NDxs6 zR7+~a@}yh3jp+rMj$)FrorlvvuW&nQTd+}}6}G^!2l;j(EzB^#>Q&)L`UXjWw%ch2 z_VVArEZ`2xA+<`^Pz@`0=P>lq%=D1Bp0eV<39+jS3(54}@6di^5&jq47-ukNuHb&m z2#mp`Sj69DDzAKC=_wWfT(OY7%nkvfWNq>B@CX-JaC37J#|2()CUmFQX!9TZrkT>E zq)L247-{^K?j`e-bUCR==$M%e>q){HBlflY@(2|n_N`Q!chBJu(%p!hxGi=yWxQ8} zOm`c&`{)AmLZJE z>JDM+Hy)mlr2xFRAO_?9-PH@lALL_g30 z=_G0?N#;t(OgF;7L;Oc$gWRj&RKvw%b*&yP+a(`Y&sMgv zPtIqDwZ;%Z(5!jSJEu1R9zSxE?hTRx2_2M2bs~gN8_VL=o_#-`gYgSvp08sez%JRa zKtzk#V6ChfmeAPW(dCDG!99o=nrbmo7cPGKcXZ{mkf4=mw)=n1UJG67Kfc!hkLUk; zO&iLQuH@9*u|`%fom$P&UlSLUfNKebS)UV6urF;(q@^T%VTky=WjOV}ptnSM?kf<_ za6}GLHXP3+z;?2``a`}9B{m|iNI+#liz1+qI+AC`jH*xQsyo*yjG7tPuzO&yOeu+@ z(r>uzs8(IYL<za=Ium92B|9jm1`kz|X29O*K^+mDT9;AvLk=1aPp3aE`DgP=bl`+58d1_mYzPW;NX?z%Jah46nbnJC^kCNbLoM{oV# zLT`QM8L$*+L`rjDNS!vWOP_L5Lp#JOXBdKutU;{HEZi|nO>Z2Af1NrpGOMYYj1QFmMwBp@>Y*jp^4nCTukjZt zR`rBDmUmc_|A}_rUt7Cptzo>kI^IQ$_g2Qc=HKQFYYRB$1M--k`JZV!7?uwaVDw>g zjAWU2fZq-_eKwrz=$a*!AG5|a^0_wyHcJ4Tn*p0O{4$oNEDU__ejpzo83%^VKN1@b zzEGANe+BuH}fT~$1YN*dCF$#s<< zG=hmTr@sjmT=Cm{g|Jg&t`C?)-u&xuD1*)D#-Om!1npI|4@f6oGXIHw9}Y$UID4HD z)NnfNz1l*L6P?10bg}o(wT^CzTIW#fx^cNl!R7w5!zu@JL+rc|*)*#aXTY-#tLd<9 zpP7}!%jemF6L*-PsAU>3PD`KsM^kD*^zq4m@MzQOzev@VRBERNf6*O6F1UkQ_zO(~ z)?HrNekw1dg(e8${l6edRvLnvCIvSAL&l9v1GzBLr**3)PvukQRcgs5&(i95QDt3x z7UA?~TBI-H?^ZOTDbJ)hH+2uX)0FTHLX8}x>%!w;wvM4Cf${wA5-#wf5BP%}x;W4P zE}gl7%FrHUUNK9*gt?PNKrY7NJ|%2QejN4b0LPPfRA@JL4lV*J7!CG6J7E05$?&tkOVCIh$zpZ&=3>$l`zFbYi315GV2LFz)2Y+LH0c*1eMSn45M|oYH)MwWlpFF z%>P5kLJqnoQIRb~obKU@<|iY4a$=?;sQM51I6MUt|7k zl3|n?Tzx@h&`2#K&gb^&m@U z*Taj6J&>xgx_GdqAMl;ek&cvGwlj07Fu4`l?#iuDi#M?clJ7CMPcLJ}gQ#PjQ3C!F zR;?dT`sCeEpYigFr@+eys!?JU}PDdLUfxyhLS(73-cQ!9WBK%$$ zI~Uyx)Jw5#`PM(az0&I2j;%irR7KKZXm*vrx&qTI5Hpb&K)mUZ*c|KDRR1={x2_}O z&Bbg6r*DS!vMB)#>by#ROU}L{wxR^#!kG~m`Hl!>pW^W3;^w&>_L zY}hP8F=2{HrZZ#+@+C|VGj~R;AV_bgrzXg=ogA50Ij8PRXcpRNz&TbEDl>+u&@DN)Oeu_o@!^ zcq3HXkmt=TL=8*_mf)>2zF;i=*K*I$6QP?$E%1Wz*({}ht70!o&UAgp@Jb~Yq1-0TKN+l(FXX_^;DhWhCn|? zpqH#zGp%Gicj}lsXR)u)WR-F+J5bFs%*Jy`alDD{bKb^n)ZO{6kiY)DA-g;tG7a5d za>%~G*MA+TdTaHiRr-q|yP^7jJY)wFk2s*uG&m&J@Gc|KmsWj*=VnaI4^?0K%mY;) zIB@4H&U`l(Unjv@`rR&*oYm&)jDO@MzJ(#ZH?o!Z_F(?FkPPiiWIT|s6Yl0uZoqHF zE~d|Id1LvO45h|_6C8*CC;f!~>D892xqoHywg=9?{C_z3(Y2xvKHn(&il^|U^f#;e zJ};B7b8<8A$2Nn{+$ui0Whz#EFq)0N+i`5+puprgn}@>M;#}NE(~cdC6qET5Jk5^| ztx3MTD|%R(xU{MqP1*5i4)Q5;7|$*3`F?aiGynMl%jg^9UcsU_4mxXp#UG!y8Xem7 zc^*yj8|UpG)~!K|<}F+0e>QLb^U$q6IU2eJt|J+`H*?J7bMWElgvzF_`UzP-srm|c zP89tqHEX`c(VJg_k4^x*`rxnDM~Aa#@-;qBjegzpn=Ethz383LhebcJ#@h|iWyd)` zJ2UL9(P{b$u0kj}>){;ae&%PDGiR6Ik%J_ckIy}Dr=LY1MzP1XF_Vqadr0)M(m=FL z^{sUrCY$`5-u(2%sh&mH;QA8YKR9PS$In^6{IAYg&-8QFum9LN%P)=o=m~i=Iw6nJ z^6#9GbtCE{UN;d;P4-c?IL%j$Q_K@R{coXuO@m@~YoCGo zTtt_voGNurIMYF0K31J(hH=kzLC6Qp9HcmgSDkgPa)YY7a6sS#&*&vCE+u%;MLqbF z*AfX18+tF1BLiy~a*ouO4?LPnQoTsWkA9QVR@7OF^Eb=oLFf`n?O2R&{hJdb{a#bdhy?4!ZjAh_&IvR9$N6+FxI|6@6zf z19)!KXO2ouG_7{;8@*lJ9o>J$tv{%nao~oR%Gk8k=P^8dn8~xncSU>MUE|9AO9Z*i zygi4y=+-1Yg5I5fj;7{|qF27|$|&e4)e>zyzE0?OF7J2?i?h-B(aJNDR1(!N_5q!Y z&c6K26%|*S(cxLAdftH7QC+>`BAH#9Yh=^A&;RKXS>vSlhnUa0Ow}&%fdjX>Jeo)x z*qO@u8Xdj$&V(YR<#iSy#S#22jA(Rgcb}+d3}#dV-Qpj=I4SD-&-WC1%wEIR)xNFk z15PT@d!3h7U!-lri!Ho)b~>64_n<{x2=nW=XNHk)DCP2z10Xf%vicqr-!s}CY@qw{ zRksh)}P5RH$H8GHdjyvSWh=`_YhdSLWG`k@S|wgLa? zAJ356>nQ}M5zTyUbEzNYt_ly_<+5naXi7iVqkWwj!?^v=NByiS{opb0TDRi*vpIp?96MGgb9!6{?uZx+P=3)(R>jdb8xrU9z4IaDBnITK?xr zrXkgV)Q@uXmGd|AUB4{w+bt0vPotaJ@A4S;F1qK?SaL-cEyl!Sw%nf8IFDTBruwcI z#J)Dd7*xHHLiTZ8q&<*6a!6q+an!FNK(qHfY_OgKLGe#jd^?uYJt*%kvJWt(RHpBQ z?W8jHGb-aq`+JpD^)eOCt$9auwBudR1jEaZ^^I`1U-e@8bCAyHdzd&>v{IG+q#pL2 zgK_E)3iPGuy7^!jZxel@!o<6X9_g#Z*{ly?B_7Et^9{8;`Qnzng(@%Li4U*RXkXWF z0`QVC2XL-@Xk2d^^7UT2f!4awGUp;4o;mF)dyFM?{2-@_t-QcU@|n%&=z85{9(-xl z+j)zP6XF-DcvC7mc8HY;iWB zjz{{!4*weLDn4pD`||&)-bR+!f&IKo^}(%IsXgN;RS~1H;Y&u z?K8TW;|(!B&zTyzv}(I5k}b7OIXjJh9-~%Wq2v6n2#b85MUX|l&mtT ze?xM6Ld8`)7`kN@ZQh0u##cw%XdT#Yztij$#AtjO3Vk-fCr2!FFESo{W^B}~e$a_Y zeFrXhDUO~@{uj+w(d=1OKYvQJzE#h!zO=fHDc2T#lkfvrCmjTyPH!&dO|FY|u>TIX zWVgHVA=dk=@2yPU-8`2(9VC3eXFhB4@yIv2Bj4c}*Ppm0=gC;rt9jh@+d7}2E}T_0 zUBNSHUeBbKpB@O_cOZJ7SXWq#6D!R5{mn()?3C6}ux>J<%4T+54!9XYnc9 zxx5>r8(Dwa_Cf}Z55WB;1`ciP_Z(Sx)9{^qu)?=8t=?771fr`fjsSX!ssUts+=c6l zB!6;5<@CX$P99+DNtVtRD%Kg#4}3ZtUPvw{bH?J#Z$+??uYCqz*YkKC)vCTmGdf7> zRZ|_{=!w1vT~Ft=d+B(5=9k6%OHA3}Swaoa;|8ZBvRzCm#e+H~eW5(@v@$fM7 z73`Fa=7l^V zvUB<9kbTX6O+x=brJtDR@dRg7{XffpPCxB;m-mmaKcw&8x(V++=xgMc?-afL_8lJR zJ&*r9;X3{oT)yerm=6whfR8>P{}nG9-!{5hYJJv$vqyP8{ICO;JVO8QR`o0%C-4E( z&nr3MJ&%V!{DJTP6aIerO-l2e>1Jw#{|us;XYdzaR`Z8{@xzl|`i!_UQcM4L>HlBb z|7@J1abyxahk0}*XZ>^eFPqmh1-`)x*ta~3PZ(cE;+rIRLU2od$N8zc>`|!}({&7g z$5#+HFb#k29PlOrxk!AgzQtar=Y;={<~w<8&WrN%TmwWIO8v3_?f3>4Iy~n34kvwn zM&}7!dR2XtD{EeCB*AM4{H?nLPHFyX|9hW6w?0L#K77+-LmvO!BD&A7dMD9$tojUP zx8T3>NbvaRbH5w^OqzH*`TT3Z8u_~R;Ev*c(G?ZXuzy<{byvNG1@Bb#rKmRA=COFy zn>}_Tij97H?tamCf_p4qcl!i(uCG(7+r#Kn)vDL4yWb@1x83EJv7VsI^s4P9{l7wV zuGaUxe3%jYUv#4Qpf>AiZB}i}rG7qJ<9k23xw$z{tg7BenlHWl1ZjO3&E~zW{$;~I zB;|n-7r19e_1*ot=c-y^QC)9x0sII5ufe^dM(~G>U<02M+$#R)=G#2-{lgdO{7^Z4 zCBGo^fB7y^Wlc~~aq!Wn22Y+kc>KhX^ubF`qy`V3Jdqj9W{zBc@Mvae@OUm5I({^B zIyD#^42P~fdF15v2fGHZ9n74_4xVKBW5k2h%5x4PJS2 z@VbMS3}!M%P9Dr;u03|}qWfNW(S;Ws%nV&gw~idYnv65Se}Rsjy8hISo%^%bGO8?_ zOP?G(lKSu0=*pqv2QR$v%(@&rdcg$;uf6Et;lmz(@YLyS_Qc8DO|R$J>D=J(O$@(@ zVf6m!iQ}oE+|Y^Rhp#Jul2a_K*#3)*`Q=T06O%CQgK)X^Nu4W49OPTuqyPUnU)L%Hj3dIhP$D^Fj2 zSk?R)>v84KpR-d89y@W8HTZM)^*_IcgU60!I27LW!8~>(m;MWMgM(IZ>g3Ub*B(1^ z;n8e1xb~XE#|DobjyiW?IyiiK=y>klZHIIG?Z;|%@sopRO73;fsO0eBQ@K=X@T$Y7 zhMvq!RViU-3gbhfp9EBPCL=mTMT^Hd6h?Vxs&9`9PoRp zqQA4blb$#;o@AtkuDUA9Jbj#6GS?3s_jMuuB-LL#sAKLaRd;3&j}H}UXXvWj;na!i zv@Wcqa;`g)Rboh4M^7A6;v{Esj&W=`{<>QIK7IVUlSi_LRUxTUxsyXjbBC`RJaUb% zin?$#Js1t`(G#bS=d|WGwyp~V4dn4t*tH^thESWfNmS8(Hwr4Ag)3|)OZnhH0rnGOOhe(Xf*bmoNOqNBp$V^^Na9m&Oe zqWb!ubI2HqmX7`iI8`6c3`JAuaF&kd)PuuE)jd@i{Sh3#j)JK1qt~6uIZ8i{pYj>& znf~|T!=6$#H~vK!`!RbFJC7HTF*4D1{yB>G1~ygU5AAad=2Q zJTv>RAZS^CR%LJim(k7&T4wPFl|f4)&aaL0H^wUYh#+WL zK$T~aPug2jv8&Il4Bk>5v=s2IIE$)p0{@Y8N1Seo)77ZuO3(1P1ys5b--g>yuMFm~ z9XDyG?MaGgym*X>)C}`&sJ2w0@V7f zT~`@wvp#v$`i#fcqv8tJdfY53ZWt98;`fNFMa2cNQ>VSZqp0?3R&nO`E&SGmd~KpyA`$G4RLz$Nxt6WsCv8bMYxq^Ih@2V zU=4l*3nwarHSEF{;MQ?Z52MmE$Jid&fh+W5D&wZFsSN&w{APR;*5eB?h|}9o^mH34 zZhXkCjh#@cdlBewr&W$+Esg)7-!IEhM+pvEPQTD}3ljsbp-{QQy1U=>rS<$6%dwa4kE zI9(g3%L87%iCTUoPA}p*aRYIF80WXe=|r5aiPL+By`ByHJaNOV%?54C(c zwi8v){^LD=F}5!@iHh5QoYz}+53QowTZ;1wasD`}y#Z8QI~K4Bzec--KA)d6xIuaZ zzl!y!er!C}`!O2Z7F!*={TQ!z3ALUD_t0>h--TMg`q-UEd%a7j{DIi|*sWgAFQNK7 zj@sYaQT=I(%d6w`)}uUs6;E_t=zpV`T3D%YpeucQjI6Z@!pUtTH z%8&GZti>*)^7nh(d5roQI~?b4J)$!BGRsZjI`-n{u>rL{=_`DD^`geD$30ZMyfXL_ z%gx26@r$I}@e5dwYq%eJ`)jCr7bNxO6zsCm6} zsn6?W)VMF;it-=s{oQsCE#L&@19$^=;HOxw9zR2Rx!aE~i}+^JbFnk{36`6T(_^tC zsO=Ks*ExQ-;{%x2TMw%Yo=$!Nb(|kSofksX`6PilpX@!9<2Us#qRuCCv7M;%UlJ8J z`;f}uB|6_>Gwp<^{6yD~OQ8B!i=U#a2r2DdTd@st_fAoOgr<0 z<#PBL?8J94evNoO#-ma*04@UC_qj}Sc z+71}ao7hg&I3!VVvjDOfJD1PHW5RSMufQsuz#nt1FS$_M%%3v3J@!v3k z@1wq*mdfB?NpE6-^dc&L8Wo?$8z}Fn!cKi_OTGaTh-J>$# zH~3n{Q1xU{>)nM~?*@E7>%D$=U+HQhUkC8u&AI20Gi0?vOFD7C) z&-d%6Is7E+GlF@_bNC6;U3hm~yIW=OPWmx}bC|(@z;=8kR^v2o-qqVLq59K>AE&+s z{3vcUF+S8+M9s$;R6j>i^Kk?A<%Y+9{&?mB9y+PU0sR=lwgd zp16hw$zQ@v(lhvC)~6pYB)=6WNiR0iDCu#08O!x!9%paw^D~Fdl=t9E$!|y1*N9ED zzka*Q-~!4Q@g?Ms})? zGpPABjjv&QOu06R+Flc`jpJJ>A9Jl2HIKUSEA%@VTZ^3x>0UkKM*UlO0j}bOSip6b zpGCDlg=#;KZ=wE4*TzxpkGVFAbCi#`mcUlx>rnBv_-4v$TnkX~`+pT_W&8)q_gtGs z-Ux4*K*f)v;>TPYLB$Wdmcutuo^@^g*4%Hf9?PisB5M4WTw6rNFSs_3vy{)d)`eTd zx1-|QQ1Pv)kF8C3kVYk5@sq-zuS2Fk}>3-LR|x1r)& z@%5B9yVit?Pq@~IGn6;DHe1JhCVmnXKY_1P{jQCo;zwK?#@AAwb8X|6mBB@f|0*he z1=atOYeiK2l4}K=rhL)0F?=LMm zKgR^BzDDE_6gIe4hpMmEwHkam<<+ju)p|XnsCowQGU{)P^UF8${BczMWB4-SM_n66 z)t_@Mi+Rd3u5F%6{cNW^{*ZddQ1y)B^H^@gwH&IRtZNy3DdlO``mv3A`cTL9Ue~tI z@%|T4{hy4}={Vho8m|s~l-3Jhj^!F(&jNOn&Z7F)iNB_v(%D|`EUMmSRKJt>68hET zS|h684X)MWiz%;jEp?W+--_B^e0z3O-}(WsZwRWcLh?A5rxi)~RrybR= z2E2lPuT^{g0xEwF)!$hhCw|7YDOCM=*Cz1=lux+UiHhsM7m?rYS}Q89*|j7puF17J zY-IcFR(bpmzL4^5*EUh{8?LRR;@4d3#-9_vQW=jsYCUpsI*ijDsO_0RZO;H-iK`Vp z4wIT?ch~mmP zLH?d=JE*vA*S1h`o32gaUFcUGs(P3y){(n?N z<5qTU7uD|_*S7K5^n1&-8C2XfK8O4%*CtVM6RwS;;>KLt_+>?OTq>gCmvD^of@=$? z_<7gn@L814y4H+3uGOL9Yf_%zA` z*JgiF5nM%l9u+@{il1<83>81>+6X?C@?qB+@IN>|*5Z?>Xa46E!3gKSX*^DP6jkpC zK1Fe^Wl{BJTubAVDNngpgFmO<`JYt;C#Yu{uczJ-Jixr`N7dVh!^HKv7NY9ycC8Dq zqrB6#)xC<~a<*3y6~BZ!&K6u-K*i6yHiy?zKI>W<2Z(P+#kb*U%3EDaqT-ueOJI)j zM%Si(%J$NHLB*%=6y*c1^`YW>UF*S)iqE??iBF<@ z!nG7CZUD93{jT+*;(A;QQE}a_1*mp6f9&xasCL&~TSdjMxK_dwlrOtBjcRua)o$Lk z2~^y;Yh$RmQP;ZgD%wq=;+ybl$`h_NpyKOYtHa}z*Sa>p>+zGQ_z6_}xND=R_z~BJ z@fhVf*Xr>&?N_7X162I}zeU;}Dt_0s9n4U^?b@UyegqXijMu1s*D|R1v}-ARBIN_F zt^R98z&B90ETiI!I7Io9Ym2D(1=r>=P5GQ_?Kq?UV;*;ZJ)??eXO;kM_uC3!$ zl&`rqjE^Qhh3dxurYP@strr#F<64M=ly|$fzT@YWWsKGX6~E-#B1Y@s+B_bme9pBt zjLz?<_(s$`YH+O%6<_OG4PHrkwQEa1^!O=Md>$1)>Do9de$2H|JVN=1Yh9@GVm<0S zxbXu&zplGhL|q?Fpz0sT0piD88$s1S>{<>FQ=WBginH`tq*m+Z9>(b zz`rBD(Y1P1{dKO@;vvdwTQM2u_*lwoTnkX~`&*G##>Y^;=h_(dX*;0ev#9nnuBA}%1FrRB zFXer%1$dPBoo{>mHvTQ;Tdr-O;@4eU!$(oR>e>MI5+9=4??%OUxz>S-Z+EQ?A4z$u zYfIm%2p&fKG)Cix(Q((c2~_;JYh&0${HSX^_z>bdQ1R{f2+G@BYevNU`kGv8M77u8T0LG$d7W!B->3*e z($lVup{_?WsQS|QP|8!T^`q+RbFCLuUyo}Y7_B!xg#0$wnlV~$*P2jq3D;^-aW!}u z`PHuNf8FECuI-`Xc3s;*#jRr(`D?DNpxP_Bwv37^x;B8(eIIH(6R7)#*{^wi29@87 z$`4TEys}XdJe+z;t}UR(YYNqmJZiiqT^mRBW6ZTtd>G{;uBB0NDb#okxYmb?>vgRM z6&JeJj*4r;Zt`1QOQPbMTuY$h8eOYF#Z{xmD{!s+Rgc?qZ5I`{)qp8H!7~nwN6xAhigexTobDQ3D+7>arLg% zq2g*?Tm4E!^qjSX+U`rJdJ3rhd(pLdR6TR9&EkV8pK)yfzsh`QM?KfA$1yB@xgvV* zwTRVRe`oQ_EEl4l%XXli%Qm4tSFb~TPFw39s*cOI)+?gtwJWISwM(eywezTc&)_B0 zlSjibd?XIU>CV`;*t*zi{1*M%{!&G7ZMm;6Q7 zrsDiO-i!Q6*ZT1w^>oMOU3gE*J6&tTizrXv1F#O;F;F?~f1!fk3=6`tYisx$#(xff zhU2LC9IC%rY-jn5YXhk9?uyGhQRCg=S}SV%HQ~h=pz7IQs|fC+c3s;+m6uTE%aZb< zYm2CU%*5r>croQuu1!eFhvV`b-iLlIeO@qCu+a;-fsZ;Z@ z{(@`c80{}{c^1#3JmcCxTpr?sXs-=*ylcY8lCHx`aqqJ}FE(+V^crfpWz_M!htr-=cbgi)Baf|rZxiHmc9H1S+o4wFa!Gyxz5mPgMjzrN3EJdULB=JF z%RBLrq?=Lua}v*?z9!ci@X6%Y#O2jkLwVrZ-pA=D<(pW`^~egUo)VtTa?7q2P{*NJ z)bVE$bw4>0r-$)oET40&FD?)9zAWE{Iv+Hl>QCVPwEbPHLmfx5~=Jcs^tVhiI@gPQLfALTwy>xCa8ZXVUnG^!s{crNwiU7J9SdnPu8 z8s9$D^1Z0>?QyLeHNG8jc{^%++gxi#jc)=qzICX2YEk1`<63~~*Y2{f?-pu(*HP=c zhFafM*G5t6mqx|)Vbni7p#HhmfK#-)|IZac6>j3QF^~VK^hf;uX9{(^&7tml!q~<* zKZw)YALcrk{bC;V9B>A;e+{FaOZK3i?*(ysyXebppq5)im1popm_)s=zVIP0&*O8+ zAH(Ni3U5h2+wcXX1AGf^e9-rkBEFFHENXw7!5-$-v}<|P{x<2_1XeOW(l94JF31m*IH5aHM>@W3F6D|=XnCh zzg1K{E67K7Vac^3s-7j+3aEM(T}xw*_-<7H5~%w2-{tZ+G>)X48dVVmBQN0+|>slJ4dR-g9Td7{xdhp+wC#@K* zC+=%KT}xoJo~|{Z;_F@8e2>qERSc=8fEu?+)Hrsd>g_`HtJAf1RK0Dkwc@!{(d=3R z|AYDhRQ&$CNmE{SZ5I{4R^#u9X)nf>%@DHgZGWvW~AHy@G0Q7FB;1ReuImf7-PHRQ>&~^g4+R5QC{b2927`8s?I%aw2N- zK+U^){3-pYbFBu|&uZ5K{4wSG{}^ezsP`SFFuG1h)zgVTVYv?1+EDehy4H-UC+S*% zI?pb@#p8>p`M>1aA}W5twRzkne$KTl>O7N1&GSywJa0#R-m^c){RrtDycKTXU*QVs zc)Nt!U*=K!`!wpfo5c@fJL)}?2Gn_@{12X9N2TZS-x$wP)I95vtWP^?JlgO_^taWu zBx-${Tua~&DQ|Rb_01l)f;;4wTq~mDmRu{K;uc-&!ynL(X4L&+5T^@o^6fK$+Ru99 zbO&nxZjG%$?dRoLUv2}nKNe8?`8aAnA4cuxopJg88@>Kj)P6e|n~rV6>xpZ^*Adr% zdsu@S|NS@k^Q>LexUXXm<;$q@1=Mz##Ya%jWb6Reli!Pq>qd=t7k-~{>2$3fHEwOL zwc0W|e+AbTQ1SDw&Ea>5pLJ~-A4h*Da1@78{m9|>ST5^Y8dXoq zwEZXx?O91IrnF*XA;%lCj2Vp z3D+7>@%66Nq2g;@3sCQ;?Z3=Fe<`~*`cgh;;1H3Ctwo&|N-yF082NMf6Vl!I?>O<| z3O#CW$)T=CyYWl3)8$$Mzeu{#wYo`PzWgFzeg)SkFS$00pC>)zS}*E2+>I^Fucp{~ z{21wKya0@( z#Z}6iU28&Z$AoK*SfaeawWa4)1fL~caBU8a`ugx*wAX>!&h4nr+1gy&AFl{LLtNRl zP5d-*OR=-4{>oe9X1or^V}uAJuxg zwu#Yty0(rtXgyuaquQIqW%4In8$-p7x;BD}8+L5~71xjdOn#qhJ*c?QwQf{gmuuCi zxBx#w{{G)b+8!!y*R>r~+_r10sJIpUF!?3dim13H*9xe(Mc0N=aXBoKpLH#bic7gR zfQsvPtqT>`i60`r!?iY4T&ru%sJNtSb*Q*n{2=)?t_7&L{ijA+85OtZ+6F3a9sh~^ zHP=>9aV6K5QE^4r=1_67xJ3SpYg4GWyla!FxCz&CsJJYCfc%VWDOB8mYyGIWKG!-? zaUJ-6^4nc&Ma4C{mPExhxmJsctHJk?U+vodh{u&(+e5|ey0(srTf+kRtFDz$?Jc`j zM8z$+Hj9dzK^`H5)2`)Fag(l1pyI||%c9~k_#X1pt_`5#`d#Zo#r3+@fr@L#caz`d zS~Dsx=~@#iF5y}YDy|y21rGz)%1`mQJ=bQE`2)^`hc>Tx&n^G3D>$%^>@0qf1TG~c5MM?DW7+31m8${*tKqa1L-c; z>hblY>s(v8mg@-8CD*3#b)@sIrSP?+2V85#*N|>@ZU1ydFipDbS_xlGdfBx+zKZmu zYXkU7(*3SA;}q$nYvmlzpGfbywu~<)U36^{Uq*VuwSLT#?sF}PT-t?AuI-(w2)LvR zcU>zYmu%sZYZJ(&TR84o3N?=g@I~bJyVi@EM?J2E$fZ`;?ON*zk84IQy~3nx2~=F8 zYYoUHS6J`b`f<+7e6G6Y9$LXik}kQ2X0ea-jC*JVA4YoEJ(R_Vkj}V=Qut`n1MZ>=Ij9%{#K(rxacW(-Lu-9rhylyswes2(pPUFROE!H1Ksb`R|zRJygWYNiVsF7O|i7f_rEVpFn!nJv5D1ke+f6P2we_ zC)`70_*l}T?xA6P9O;~UD1(n8opuim;A2SlyNBBGB-c%~sOMe*K1bJ48SdY>&YH!) z=Xz}v`>+RpL3t7gMws;uEnG)0o5|ET_1?WVsdm6yr06PbHm0 zjb|FQJ-blbvk|pDSC29tIF7&4^7tRvi(DGFbfBJB?p^8qNTSAl|A=32uc7)kffcmd zg&OBh`@VCB=Hi2rd5xH~;8(gbHwO8v}4XV9r*H-_|_rnruKb%9=GmBgTgfp&9q3X%I zHi@cd!nN`tpAUJwhlzVs;1pGWnh8n0yey}#wU7uWGAT3=NB)+4#DC4U9AUKvz> z)5xVqm~yQj)!#nXdXYEs0oC5TYjddfW?kz;ZO;(3J-blbu@==%x!c<< zp|;x$s(-5w^Y&ISOMNBRim3LMTq~g3TXZdt*AQQNsBixz)b^jjzi0c`;|oa_AL8xL zVS;wYQ0tkB^ZQWi+lyM?9@n~2?RUA>iBF>a4%c?Oe0{f2>svz2-y&+gM^NJ&qSkle z67NSVYQ8q1`dfqQ@5qC_zpD@Os=pnswc`o;+veJK zr}uXQ)!!njzXeo(v#9=dqWYUaZMQlcqP+A#Z+98BorlLh z{oX|Nd&9LgRQ#%ID|npvl4~QVd6h-=yBF2(5Y_KGRKGXP`#q0eCO?DP@0)QA>v5Fj z@*O-MCOv@fV%$4W$H`_~CqEHe9lL*j&tF0Hzl2;OhRd!kq55BNZ4rkkUvO;_e@#1` z_%GC-#Lr{-e)08R>=J&Q{>|ZcC?AXMMXg^mYP-~+t`E2G>*tSk93wrBsy~aWzX`ui z{zki7MAbi!s((DTEzVEG>GFNNoek7-bEx*yaeg1F{dUy(D~W1<`rh9DD60J=YP<^< z^L&7G0##3Sn~!6Fng{#$inKk{IPSW(gBr(e*EaFPw6h#L7dsJ~jtx=c)DT;~r+?14 zj_U6+s=te<&)H^B^^T#=FIm**zx}AsCp$1&PgGnrYWeMpyq$H_as||Kv#90rsQO1y z%coHF_n_);MfEd*iVslnJFOmHK+X4Q`~u^*f6&Kw6E(hzsCs5l?W9o4_n_Kqi>-^@ zyU^R)LbbPyYHtD6-x*Xr6R7wss=xiHb~;e~ZAQh{qT0m|XA(6JYf=5&Z1(tB zd<}6))PB^2*RXvPt~H>xf4ysUsO?|t+S&#Fxn?tdk$Mxc_4rQeE#JfI-NPpmx9i$A zs@^TvHc|C%xRykI)~aRl?tWYz!*@}C8g*RWNcwR(k2)Stp!z?Kn!jVNjiA~Yb}ff# z+R3`sggTBjptjTg`FK%41i>f!{S{kpW-jr*-c#QaV)cG)h8Qi~{ zpI=Au>9p5|I?k2v%5yi$S5U|IJnA^nkGekT!e~E5ZSU4zI*&9HHI$gxil<>QCT%aqmvtKjI4N z{&*6X=x-k$BEL1xFWiymzNE+St<*Pw8vi!@JMx=wfcDCF@ayhP>?eN}50gJ1r>Em| zGhV0h!KiEEzx8I0Zs#s^T|je3u`4ev{Q0@c52)O*Dnf9?HT zLdDIa`rC_NXSt5JybiUUcW>+cU&rexFW?)gcM4yF!+47PR(vh#t=q)+hp6-%&SDN< zhasw-y?UN2k)A?*o|wZN4&Z6*`>S}|P@g|1V;k_Jq-#*0KQG-np8xnb#%0mvd%@!Q z@7gT(Y5u#G!F{ba>iE`-+Fnhl&$H`L{o1*ef1bUHGvv?XGnVzlEkD}^ZySb<5Q0Wl0zjmO`Z>^}~e|215sEx0aQ1wj3_Q$rx)}iW~yqTXTlBoNQ zv2%TU)uOiR&N=Q1K8N&tY>0Xe*NM^jI(ECp-v>*g;%j2J&-VCI>=eb<~?iy?4-uI$wpTd62~WGR_THL3;my zAMbbZX)M2v-ynY+zlweMbmdq3c$Kh{{3*N*_2%$e%2ODS-+{WXPNL3BwHVC@d<6Zj zc5S~Zo)50=;j>uJUDp=ySH$O0{U1Zk?*Y{O?nYhLbm9if)#0UBuJq?zn;7knsP*bc zJy%Ge`qPM4(4Pj^>QLic>sk$l>aT0N6+YiKQ1dE}r>L(KxV2bAe~P~eqW0!c?PXBy zrSWp=OS#sMYOl|=UQ~NMuC=4usYT7xjbD4c%cy$CQT2|Y>K%1$7*%i1wJfUMjBCB9 zdfQO-ZvM*aEure|#f!=Bmek*cs=w2N>d%H9zZPH_JhgBRzwfPa~-3=9$=D)V%9N9e;|y@Ol?f`~PI@D5`%s{1EwF zsP=wzq*i$8N` zQSHp2+L?AOk7{SqwF!I(<>RiUQ0+9K+G#+wv$W^wS=^*NkG~>mTV;9GlI(!28 zdp`}L_m|dD?=LOkgS9@c&EtbueiGHs3G7rqTpLC8bHue_d?4jH*J|+rq-$K;|49(N zzf{5w@|Rt{Pl@eMI`3K%wg0aEI0(jZ7PsjCFfzqjGI$p08r-Dcd%HpOIny?NoBTD@ zadZhaPBW~x}dX|t&;;`V_ z0;-;Q*XB_5%(^y=daj&7#ivo@kaDdb72oGtFDkyrwNBJH)S=>Q@gU_jt_7(0{qILw z884)K&$S{({l;eU3$88TIqJ7-bEvpk*Yc=#GZ^(7TPRPt){p0?->&r{x9nk$YY9|b zBWk=FT&u%#h^uw21{GKB+WNMS*9t1WgzERQYfGs3f@_PY_yyNyQ2p*e#kXS*%eT4K zjEYaX)`Uso6RvH1&*RpSM+o7XYb&U@l55MTxT0&*sQKQ6x*n-RwO@;OXSo{J0#rTw z-;J~~s-8X9Hc{u<6;ypaN>fjWT90nmI#Km>xYmy6Q%{>~4cJcn+;=>F78O6^+7v22 z@7g5Zjra-IvUqRer?-M&88i4X>_A=5)uZ}Vhj%5e*0pL>y@6}{-wuNFC@;HwFOtWt zBewwInrkblxRPtjsJNnQv#9oFu!;O>*Yc>iN!KP&apNxEo5XRSxJImI3fG{HlO?{4 z;Vv9Ei>Tx2JicDXS5!MwcxUR#yL_LLw=?eYeM)R+%12$RL!BS0@d6z;zZpcI3l>oI z4H%2TGaK){wC!tH;+2*OrrWTf!dG8T^mKM_lRr5$RmI-=UO-F zxU=&OkKaZfA%t75ZJ^@UU0XxNue!E`YPW#gT8E3S&7=+y+qN)`O}yL~ixNZr3_d^>(<{j@-J3ZLTG;mUP;vX8jIAY*Bcq!|ZMy+2z{)~FNFhROG&QIVT`Ku*wX9a&u zJtfzQcn;|$*9xe17G3MZvsE9ylyR#=osafE9z?&lvWqVve;u_RYpD8HT`QsLUv{mC z|EBg_n?}V=;V$`k*CtSLFpq~3r;*V%|!nLuuJQtT|afkAZYhhg85tp~)4=HbR z`JNK5zcDUvKyH=7de_R!+;7tEHmdzC{66KIuC1f`SBlG*ahvj@YXwyNTwFej-=loS zwJB8mcw9b)-=%!iwP93zIxbJ)cPJlltq=A2MH^~d6L<;fder#U#Oa-X_VhAp`32Pa z%;UGIZ_c$zsv$3$5rG~EL?GI88sh^t}P*#N@2mZ z{SPwE)W3q4QD1*-9jd-s)c#%LT7ar=|DPhQj9g-cd#-Jw_U{sMsnxQKTHkrp{+&lI zfm$Zx{65tEL$aWp!zZG+7v#Ebl$aYoF#tu13~nA zQp-3(KPIu6`~lQF=tUi`+hUWrOu7Meyxw?!5WJeW1$;J+;v29B^*(YEUyF^X<9Riz zzX5V-6z;z-()LjO-F5lClOW)df$uwsv~|?y9?Pin!5pfdS>%!?oN;XmRZrfvNmM-( zt_`C;4{1kz&qOu89@h%q&r<9n>T|X%{+#vf!Wq(aIE5SUn^bG2`^+aq2S6H6}>U=x% zPH%S<)$Ry#DH9I6mPNIjaV?FLq*Jc7qt<_oon6kO>Y2k;_0P3wR6SF!hW?f68>PfjafLy|a{jRm5 z`muL|*Hc2(vy5E2ghkg1sCpJ%TR<)?!g<$5QT2qV@!oj5*RzgXa)fKHt)S{Dxwed) z(!-)_c~m{)sD6y$Cnz6vZ5S1wb1jRC&$zb#wjj7Qy(Q>cFCkxPPb(zS6^KgV1fMa|z4*LqOx55JZB zCF;xK?XfquHMTA;uST^KAeRn&pGc(bq1xGXZ3j68hTE={u!D9MunY5e367%b8^MpV z{IF|TRDBuO(y01UuC=53-HeJ)BB#2r$+bpQe1mKCsQ5b9=HBA-YzFVh`Zl4~yC!yj z&g(5Br`T}MwH-W%^tNkT$f+~jbgh6|&l%Kuj-l2wjjDeDHO@V8x+6{}V-s+GMY7e(v+eGze!?kr(f7VN7ou7x;Ce;ZNr ztPVA=x8LmJHie&NKN~>p|J|taj<`G#TaQ}rYScKC-^BALOrhd?QT^*dE+xXywJucu zI$i6)5z_6h?ag{W)=~AAQ0*+C>K{hcUw)(aYX`Nx^QioEoZpSw?;6lp&#gDOi>UFN zL-lVKxnv1vT$@7mFYnqUs(%x%HKX>|1gie>>%E>m3G~jnhSL1uwzxH0i=KDd0*Br1L~9|V1jbBG%EW>ougFZJy*jT!PsP~}G781J-e1E}rM z?^+)oC*A8>0u|SYT$+UquGOL9YF(>A#Z|jD_7d*L=uZQ3N!7CQV(yo4;A#AQ@}i^wHQxa8U*s=o`a z%_El{;hbxER6qJq_10swy)oL}FZAs_in`8C#g<>d=ew-m79P`jqWU)zr&Flqlc@7& z18RRQPx$^ijq2|da>)|rU7JAlcigoxJV<)fwSH7wA9BeO_PQ3L;<{byLdA8u)`*I0 zKrR`=de>@EaW$@0qv8VBR-W&1C439{%dRb<;tH-UqT&`@n?|kw7^>Y-|6 z&$^aD#iv~hQE}bKDLU+OtpgR;?phlvuGO_Bd?M}EpyvP9^Squ-XVtY8 zR6Qlv#!>B$A*beW)U{z$T+X#DDlX&N03N5^HdOlw)c#(Ns;>??g@?7SRio+)T-$%H z*H?CJ5*0UroXW#-*GBOi(j%@7qxz9^ErDuxXWZkrkyCcK<=O_GLwen{HB|hn%lG5> zd>%&~Cwo!-=s`}YVdz>Hs-8~QI#BhrySDWlkK05}g?ukgq^+UiR$W^`#g$we$7uYK zQ(QRe+Au0E=UNsOmvOBdhv;uRYW$O^^(&ivhMHl22(P14$(bfb;Z z+U>Z}xU~CWCT(7uvcF%XZh_S?H%g_ov%-xU(%M<>Mx)ZM6N$8TDp;1*-a@HcV725% zMQQELbE8mNdqZwCTUx(%+~W0nQn$$Jb~oB8t^SM~ZIM<#ccW?9?~koF zBCUSajVjXW54+KPY3KQtmHz(NVrlcf_JUJ|XqVhmKPyJcVB>DSINiJP9ppFMQQENm+w>gT=`?={2g9@vl~szZ)*8m zY5Q$J>Jgzj+l^*P+i!uZ@59mis$4{yXvR%by_}(i==Mx3wjNG-c8G=&{UO3tNx`Ym2QA+3C^wCyjGm&mfbTm9Q{v#u9%8|KoE&yCXNsUt@;4;$QQy|j6n zaHDlnj~3N&H)=|&w@O;QF*i!3)m!044SBorayMEctzJ!9y~S=+l~%9fM#EB%6xG;` zhNO)rzsc)wmU?ukcHL-7TK!F~zJDdKTeZG_rEj!G+B~k1p&eJ`s^ZZ3VD{iS?z7F`+n?7JKi=(TfSCW{X|;55uA$~ZwP`{YrVGofVQ_n+Im&_897&e zQqElOc|uyQNLz0}+J2iYbqlK&+{hksZQgfW*Ei}(-CC;K-Dsne5;ra-#uxit0};)BQoNm4|6Rjgjl}Q(C@6zEI0! zmKRyR{f%CKowWL^rLET>FP2t+zO?PklQv)Z8+?8?NL#*M+VVQf7qYzjdY``y()Lp( zZQQG*jeEAV{kDD7=V`sPd6(OS8ua^^;Bq%Kj_mK!zYXVuP1Y4uY%qVg4P zv|L*Kx*IK%R)5rumP)IaNZpF6OWbI&w0a|MRFzh*;zo<4)hkQglBx^cXuh<1MK_u! ztzPIxbEVBk?^>_FUFw!p-R4F!(&}$=Rx$KYI zjh4y&xZNn0c6`s4)?OfOzihw8=W~m+c5|s)RCTi(O-pNc%8fQjYj@I()=Rs-u9jB6 zDRs-Lu5zQ5((0#fv_e|_h8tC+oquH4 z(%Kz%qear%ExXY|Y3-KWXr8oow_oM$Zj#pSdTHYxm)7oDsat0?bEB5Dc2~PmQ(C*L z+$fdye6#V&AUIF$uammOFK9~Z|8i;DnN98{o>${Cd9K!9D6RgGwE8<<>-DFl)$hpu zaVc&6#nSfUFv}N8>wih=)?A(MM)RcgKXjuZsau}D-=%Lf+f@7MYkd2o(zaif*8h3Z zw!i8M-`-Mb+n+D3zjLLnzkLbwDsBC?w06g&tzVVa-$g7hNZX&i%YFW5h-6j^>e+n`m3bXZ%A9eVp-)| zYrg%}(%M}qt$bM8{KitZ-0C7XS}1LPOKvn@>XutAy3uTDT~ME3IDU zMysUN8Nqax3cd@^3)ROjoQk^_sHk9+lZcEy{k4k%= zsUU6r{36e5q^)1YEf@OomD1K*LLQX1-sA;t14pF2Pqkb(DJw5xe!g#aNZRsEI;hN1 z`6jtq2dU*@++Ov(L0W$&aILiGvenX_%T`L;kF%uJZ=d7VrPV8t+pqHFi=}NZKihK? z%eblH^*Xp-TKj8df4kDQyF_k~vFuoTuk`oD#^iB2j+c|m()zXaEO)K6el3^RX}uw7 z>letI&h+(G;GnerZXWjaS4vyIPF^Uj{Viv>t8rM`cFH(kTK|Wn^?$asacw=F<6PQv z#TGZ(EbX}Ky3w>;raa|FYoxW`lGbj6yj)uQ6=}!wu(Wn#H!4eOccB}Vq#nJh^WA9u zX?{Jg$6kM#)FW4Q)Qu8p^_RF&O&+Vf*p154#uZB2{*ZjV$_L$OKwACTZd8y~f0i3< zJJst?ORGO6bxW;oa-)sX>UZ2|gS7f>HyV~!FP2NSe32V1lvc0gM)RfBE4tBu98$jn zY4vxU5(HPPyyr&Sq}AW*Ml;gtZ*ikZd4TG#mv%o~Bdwj5yh`h>cB56&+8J}BmD1Wt z-KZw5-eRd+W_84kD$?o=yHPBy-Xb@eEp5A-BR>wur5zt*(%M@oM{PT9)R5NRayP0= zYj2qw<%_)DW@+=-b)zY1^)|WDq_lb)-DsV(d0Zp=Eqs`Llb=_!MTD>VZ8ke@c zwbJH4bEB5DdaGT1KaSU1@Cs>%r!eyq&JRM)juMXp^*dCf#VGwC#7?XtlKZ ziL`z!k#_vo+-O8v{i+*Pq}3mGqs}Y5-Uey=t?fn=((0{qqj72V*1FLuY5Q%3wCy&e z?YHG_v`kw4Q8!vDt$yN0!_xL!N!mONN^57XwD})!qk^<{X1P%yt(_g^zR|R_dQ;No zf0G++lvc0fMjNEnYrD~ywC&cU&0kU4@jpu*F1MfPd5g66bD60AW;dFa*8Y?mZIagh zq#Lc5{p+o?`m5z@Ro--?F=_Qzx=|{v{t7o5l~zBNR)3LvjmpbzRFYPIz8e*#)t~1^ zbEVyf0%^}<+fMLyw#qBC-i#aN(%RYVMqO#`OuNxWY3;0+){hCfMCI$;XsxvRnH#N< zR=?#&W76h#nY4Z+(%M-fFV}iCHyV-FPSuSn(%Knzqj}Qm&&65N+6m-kT5re8`$pTP zwX@BQwn}Sf#*HSWwbPb%9~_r<{~M9^e(R7tN6(d=h5EgYTrEE*6U!=JB<+3BlC<|h z=gH5jeEN9(-b8McpV#tn@@n!5`Ly0&ZMacg+WVtZdN`=*`Jg4AC+Epa(Q;|?QkNHN`7$?JDy?4PMoXmCtGUsTyg~D@x#ab`@*RuGd%w54(KdO$%D1{v>!tb}m9-i%pP!o2`c;(Ys^7C&zNzTjUoUO@ z75R{yEp5H6NBMJVP1^CW^~Kg1^;@1J2c^wNAYY}t?1dh6V1t+aMpl&_X&sJ!V$W0bF;ydh6l`EoZ}Car!Vt^FlZkJ{Cm8;wvtEbaMu zp|tf%Qjf;f`EIo4aQ)s%Lhfz0LD)oq1P26Z$+Wc$_ z_4)JX>HD$Z_$q)1E?X>)r z#e0QB`sef`}YI6m&zBrQB~S@DsD6^bxW(pZZun3yF2Ep-&)>tqwP|+uIe^7%B5{* zleFzjO54sxH`*X=J8d^wFLkS{PPkD+{zCOfrM0tIT00}s+NrwHu(Wn!H(DfhORSdN zD3Cu_{cZbuJDa7o)0NiFv>R=b*3P6GZIsqd$BoA1Pu0$HY4z(;hj4Y78!eSqKXIca z@J7QkptO2(-DtM7{aui{Q+&mcB73f??_$Js~g;C9m~h1F4@(!ZWK#9j)y28l)5BW=ep7M0k$u7DXngGqczgL z$Dk=~ysM(&NYiFw)ZIRYa?naxXE?L#C z8?BL6e^gp~HEGB3u(W=~(vH7HZnRKZdnGrTFLg<)7Tsvu9KU~Tk#^r$FTbJd#ah`v z9;7a9)irLkT3UNeH(DiiNve*y(Nft~{fd0Mu0xBY^>e;FUJgoYcdoR44!BW4TD!B{ zD3I3fj(z$@lk%HtHIE4?MCaRPR;6s z8?BPoUR_#yOQnr(v9$S$rM0_A+W5+DRFc;2d^ajeYj>U-ZPdy(zID?2pUOWeFO=49 zhy&8j!`V`&V71^zThzEM&!tX{>Si};N$byQsYAKibfYn;|5jJJQ7UzaRadxCS=u<~ zNgL<(g742w(#E?%+WyR>wZBHHE7g`8HKnRnSGm!c)FDz`=|&YftolRpVQnYhL(dOt zXR|y(PD#5jZjj$n`C4h)$)u*Ny2g!GOWRJpNTf z%k^Ej{pCxgt-obAU!J>H=)1cDHML;5w9ivjqesK^7!3@IW3kq>Ua#UWddWpP9*5nd7 zB3~mbvL<7xV{J}ZULi~JGFg@0EXYe`Aa%^n>1n^cQtNM%RXHQi zmAQOEhfJ95ZZT6UQ*c2G(&D6RhC~Rxrjgmar)Ge&3u>ZjgiWAvqxJ zcocS@nPcnyV^?2qyR`NGE^WQP$%6c=6u~z9qikt|I#hOVN=@DF_I|UOVgu_qiV52L z(zgBxRxrjgmavE+4&ne7Fu>l4-u^b6!5q72?{8Z_Cb5HUoWOC+u!T(=!xS4>$5BkM zh9g+P7|U3~B8E7K16aTSdnYh{wD-epoH^S2;FhOw5kGL{&;`<*u^Qd_sy-mgZBQpnk;uxmb zz&eg%f;Ak$3dUH*5*9JUK^(vW2G~2E@#75U*u^QF#16J`0`2!5{r!(EY~mQE*g*R| zh}9d#1Zy~g6^yZrB`jiygE)W%46t_`l+7(d$YnXJD#+V7Vv+wYb7$5BkMh9g+P7|U3~B8E7K16aTSd&e+-oWUHsXuoH)aZh3g+c<&am|+W> zIEE=Uu#TgcU=2sGf-#n{ghdQ-5C^b;0rnO!ew@J^yEuiD*g^Y!tIf*(`9K#eFSjSOJ zu!bX8!5GU}!Xkz^hyz%_0DCWG{5XR-c5w*_IS4zx#xTVO)^QXQtlL zu!tcJ;s6#fz@C1v?;rm-gE@9_3Ma9HZJfYy%&>(`9K#eFSjSOJu!bX8!5GU}!Xkz^ zhyz%_0DDIo|%D)^G$X7-JbrSi}$qaR3V#VDCuAk29EK7pHI%JJ`kv9LEe> z*u*hRv4M3Q#RO|Of)$Lhj3q2$h=VwQ1q`rv1mnjU%(078IEfu>;{=XlhAnKOecsFN zHz_u-j-!}h4M(toF_y7}MGSEe2e5zv_6}$KIDdCkHqbtgY5f?* z1Zy~g6^yZrB`jiygE)W%46vsUHTB1jGnk`&&eO`L&_3^Jxr1$-z;Vp5g-sm86dPE_ zQB1IgBUr&0%UHr9hB$}=Sik^#`p{N?{5XR-c5w*u*hRv4M3Q z#RO|Of)$Lhj3q2$h=VwQ1q`rv0OQ9Q%(078IEfu>;{=XlhAnL37^c`j`&_!s=O`vv z!x5}tjAblg5knlr0W4sE_W5<&u6=IZoI(5iy5%lTp?!|smUpm?_PKRiK8_i-u!&=s zVgu_qiV4!#>!5WTW1!F8@35yuwAP!&w1GLZETYv0x_T~)c*u^QF#17i$@~z$k zj$?)`Y~mQE*uXlDVuCds!3xG$mb+^ECHXvA#1IE@01FskZ$GbRpX0asGnk`&j^CC~ z;UwDU_-%O`C(u5}Z_6`mVH3wN#Rk@KR2Eb}A=hvOD{>E&$K*1Wu!tcJ;s6#fz}|rO zcN@-Nj$NF>N$g-7CvY4yY@z);{=XlhAnL37^c|3I*ww3H5|bT##qJ@7BR#@9KZqw*qg)naRzhj z;uKC|2irJ-!#>!5WTW1!F8@35yuwAP!&w1MKa?_;Chv?BW#K z-*?zJI@rbu9LEe>*u*hRv4M3Q#RO|Of)$Lhj3q2$h=VwQ1q`q^oAKie=Get4oWu^c zaRSFN!xlDi3{z}i9Y-<28jfHEV=Q9{ix}b{4qyQT?Cs6?aRzhj;uPB7&Gh#_+TY7q zw!f1x$1%eeHgOD7Y+xNnF~J&+U^ym}3{Ga1uM%#t9t94DIit ztbO~tsQ&md#Rk@K6ceoB2v#u0GM2E2Ar9gI7BIk`9;W)^#~IAAi}rU}w%#Omu#FQq zjv2PFiDQ^z1M4`73D$4~D;Q%LOIXAZ2XO!k7+`OA#*Z_YV;84z67BE8Y(CmJf#aBA z3!6BGDK@Z+#uz&&D->dcaKh9u|`kSPF`4moK2irJ-_V;f3 z@9r74(Ei@dmXBeI4XooRCRoD}tYC~~EMXBt9K-=EV1T_@j2~w($1YCcBz91LD``#b zK7sm<%zn?4K z7B+DVQ*2-zM=`+~j$j32EMp0a7~&ufU;zW{1&kkOFvl)V;UspjjT1PI8Md&AW0+zC z>o|%D)^G$X7-JbrSi}$qr7rcm50DE|r~dBo_5Qw5C^Z$k+n3Xt`E_mwGfXkT7()y& zU*`3@Qm5GNQ&RV+IUSZ~m|}u4h8SS}M*4#prkG%iWog?jN!xD7@&N67;%%H=+3%n1 z_mAZnrkG%iAqJSgp8lbIpS-o7VuCS-7+|g)ZtHh2!xR&YF~k7vd)WK+F~bxSj4{Lj z?R(Vw^)W;H9&}rtV2mLKn7@wtm|=5i&mYS(OfkV2Lkuv#oc>^jEotpHrL~{3Ji!=49F+a} zl(v4r^87N|#|%?UFvbuA>|N^ZZj;t-F8lifGfXkT7()y&*A1s%A2Uob!5BjfFu#QQ z*p@c#32FVySe{~nF@_jm{%YDo``&fyUy2FVq-|$J+IC`=hZtafG3{Z7_WkSq{VA<} z%JKwjl#fszvpmEA?fcsM^M@Ixm|%<{258^s-mi}trkG%iAqJRVNPWyO#RO}zKVPyx zUn~zX!2AN*!wgeQFvbuA?CHV9wzEyzc5>MtA7+?hf-#n*)h|h_AF@2a{CwKO3{y-n z#t;L{&!awOm|}u4h8SReF7+|P6cdcGEc@e;{qe9oz`RO(m|=m|=7rw01_M zwG*>E!~pI4_^nL?L!5Bjfu=fh@$2MvG$Yp=Lm|=6PcTONz7<=~zHi0W z3t1juzL55@E&Jn_{qeIr#ROvvF~IzI+QSS}Ofbd}1I&-3K4zF=f-!~|VE!`dV_Wve zBm3iFd5Q_f7-E2ViS{tVmh8{J?9V^T6O1uL`(6iI-@ezu)(=>oA4~g~VTuXH7-E3= zG1SKlTe9Cj+3z3A8!Wf)FRgSl(v&1j{p)rZ$prkG%iAqJSgi29geiV4OTVu1NP z>SJ5>=STMEhvg|I7-NX`cbrzw{*Ke?1uV~xq~8-Bi#1zS8O?ERQk70P`cf zy{_C%^`@lN>#)4d@(Gq_EKf1P7()!OceuB^O_q#fG%@>#{!|ERQk70Q19W58JZeKiTge%TsJnZhz0#Z=dBch8SReDD7j0DJE!t z2WH!~zXP*&W0r>)VE#hd!wgeQFh=|PDQnmMe#+VnSsq|MM0=QFiV4OTVu1M})W-}{ zOfbd}1I!PmK4zF=f-!~|V15wwF~bxSj4{LjdxJhd+oa7;E_cyL?L z!5BjfFfULa+p<5uvOm8pPcgw5?ekx@zJ317)(=^3pZl`q_PH-x90rkG%iAqJT5N`1^QMf*I6R^8n`4`Sm@SRP}D0p{vV zzdx8^iV4OTVt{!-eatY$1Y-J(YAKJzIK?h$|m2Z z|IzDvSLn-?|DXKo&ns*FPQO(DhS~=`Pafv$|4r9LE1y2x^H$v_EN{~Owbv(}ulDu- zfBRc6-{TCGE1z+z=fL`-d?EQh>yPr4G(Idp+Mr{_p<2{@LU^SpOvQ3CwSe++%!~?_8hpy@vd< zdEVZY>p!tG?>v6U>FHj+>+QZjKT7`(B_Fe&#-rn7_WQm3ma?yZ9_2O0cM18TLw)@r z*59VkpF`(B7P;Z|}hIN%r4b@{#PnHu-l4 z`1T$p-^2cSjJ(O_Q|3KeNV8x z9?)}~9benYZ?f}K^U-2HzQFPFC+ZJ!e2m=b+xxQ}f7<@f$cM3iPyeu&f1UYWL%xXd z{F59po~5gO{hOJOFOvrt&vx=h8PCxl@%3L#|85~aVdsbXvyuFM`nTstef{kRdHHhk z+vwjn$#d!7;p~s+)4vPJ`<&$a@3UX__I?q1Zc%=mwr%rSsPv~d_}S%Nejnw(Q=#?m zyW}@Ao@w&YjOV}PKV9baZ&>5o+ras{n*3Idw-xX3?P=5f?WZd||NBy}e=O_YW#_B5 zznDBrKL=Yrq~+~@ocTMNe8JA^&r)yiZ055}`3?Fx#_B&!KAHLa9r+oymwwFK{{#L1 z1NnDcU-tUAmw%r5Sx%nC{M<&~%KiBZ#Z=tu}B6nWyxxw{e=tR$Yo2bA2S8_f1GuPA0$v?9B()qlO^Z5mL zdixXa^6{Nv{Z;t}%0GXrmv>fr`8U`+(X`z{gab7 zz0lX6_bG4hNapKW^0(L@-y|>N_z6Dk>)*lgb3FO&^zYT=x%BTA^84xEAIR6!zXh4M z_bcv~?;zhw|Gr563G?||@*C;jQFnX&1@!N=+O#{ z=o%D>O`r%En!y?+h)ALOsF{m(Lgzah`Cz5kF)%-1=e_5K{qd^|=zQXjas z{x(1F<=feQM*a}n`zrZdw)Y+KJ$rcloIJ?>+WQ`F?@6xbi^%U|KBKi>{^lor{BNcF z5$-=L$-n0M^MB-q&Kv9RN2vcG{kw;}uRU+6e4o#G`wuZ6N0QH=f3G2bkNeC0$NN41 zAyNB!FQ9U{-Vo92XICogdK7=FAA>!~6XZMJNxf57#1A^p4M44_KlChb`~OMGA9}{;r~7@MpZCvruKd9BT%Px~4|{)K_N>=0 z-s|~3j*k+#!twDD@`bui*#2L=$=f^W`JU&W>GhxF`m>n+egBW%{uil#9LM)pc4qe1 zQr3SX$H%WI?{R#ew9eaq9>>Eey+`z7zskGY?P|J%-QZEr2xdw0p}4|6<} znD0w?e*E>3Uj6|2cD?U!+xs!?J!{9K+H2F^5a-vN`+a+RvcC=^@3HfFntTua*+l(w zC%pZ)P<{~e{~_{|+&{la4w;`t6O3PdvHtyxd^_{;C-UwbkMsZJ$IB{?pZwRJXMaKU zHODI{?^;%S0pmY~`{!!-tH1e-F-`_(1`Z3=A2J+HFJ^z6G z`O`d4lQ(mH_zn3%?ErwKEZr_iu`Y`7mtv?tDpO9eE%l@n(N1&U-9}k z+51x}Uqrru`8pyHEw(DeWSO(g7Ge4|9p`A z67F}iKH%-GyWZEogYrl1{M2{{DZhg8&nF*j*CUlLA|Lj5uYV8q?_vM1C*Q{Yewh3S z_lJWR|8hIO)ZSv+o0EBaU*vi*oB8<;`E%Sam$Cl0Kc@9H^kwRoy$Wl*{?)X9-mkoT zNXN0gcK?OvB^4k4;eYe>KhFAJ;rMxgd<6HOm27Xl^+)|(O@1He%Y63VjW*vZUrYJ^ zT<_cDk9^p-U(kKiUS;xH^3QiZ53~MH`FUllj#GPGM;_6<+Uujk-v2V&|A6&h+uuU{ zA9DTdkkYC64rpbr>M&)X{@~F4J zlJmRpA#d+#j-OM%;rRmg=lSH#Jg;tE=JgL{enRRWp7{2!+*zOJ^>>oraH^Lt|E9O! zVt?I9eu|$DCO3Hb)$EVolV8Mq@AZ(E|AG7aA>@$xE|P!4^U^}{waoYF|y?;OFdGa{&x9DGm zyny~KBOgfrZYM9He-Dyx(EG?XUz^Fl;dyX7c?JDFw&UBskp9-mAELjXBEN$EPLl7X zzkelvnEnnu;_dB9e=j0;dA_)vyeIvAnEYA#_h)iM|6cNKZ|^AjcQrYof47tW!}W3l zIi-JpAjkA?|3|&OM|u8<$v4u!caeX^`Tr1kDgFB^`8lpf^EP^WH~iV>`}O1j&lY~j$8)9L*R}pkQ2y11=gFNpSJtN2Hhue#vwlwb z(QLm*{*r#Kv-O8p`}&*g`9c48s^xiC?k5NR-nV}&*Vkjo`||t6PkhJASF-;m$RA>V zZT+5?|CIf6?GHTvg#Gcz4?RCd`=@U5ylk8Lqy6(Jxv9=sew_Smp05u4k+1&)p2yB7 zpGAKzBbVvVo#e`PZ~t>sUjISv$6qHuv%%-1d5_P>o0j?dk5T^*T%UhSemCR0e`_p11#V^5>bKUz67|KQDdE>woAOZ*O>=_iw+i zd9IUBwEk%OOW6LQpYirj<^80L>+hN5SJ-@N{W|OKVcSzq$!oYjevIw^o$K8Z%vb)L zx4(@1(R)0f&i#0t`C3ALHS={7`R$DNBjhjs&+CuTUd8&W{|pXKRKFwG5tT0{3g5psXr6+XFJbtCsTeP z<2#Q$!uUqX7hdW8U-AR*|D`-{jFJCrKVRwinDbNb-%0HMBgoIZ!OKq}-^=)~Api9B zUVaPtIObywc_sCKKt7A-gWr*#=6>;S^1=Ihdq+=ue{N;|&LA(l&dbjwKhFJRDfvsU z@$v?_#{7Pe{I1V=c}6~%`Cd=n%>Cvu@&U}}FUY^(e)voB66WJKU$`G0L;f=TJDvP2_md0B>*(KANAz!xpZk3Mg8TI$g}CUjHv$U;pbo&-|CXmh!`%@balQ`ufL^pW}XY26;C3pR37B-|y?cliXr{KSB<< z{@z1Aj`@0!{LjyO{m03-?e6*4mu zUj94sP0Z*2$ougA_Q}8W_Sdkzv&sL|@40P%EFr&~)q!pNq-qnCGSBuaVzE{vbau-cG)h{qh#{vT0(0{izF@(q+fNB$J^H~1^>|6c6R`Q)Px(e*&< zoJW3~@m@`SEBo_ia-H?>BLAG@@m_L~{rgRF%KqHvY2W^l_j&)%=XvNT?w7A6f12N~ zeuniAX8%1*p2P7sKlk<~*GO(ko=$9RbTTt75e($wXFT|M&93j75%-5eE9Qy{qhmM{#`bIYOh3lFa4y~ z{|oD%$?+BN^Vv<-p4N|9|3y6CFD9RUhqvD@diy`L`?=O%O8L@NUcTj}UcRS2uc*91 z`K5OMR30Pu?EF%0kx%=O*KfVd>)%27IOX@+{?__!@*KNfC~qQvj{Yq=$?N}<{%@xI zhxBjJA}`<0_PUYhNyfj8^~db_NbLu=dU@vq-oMqa^n4rdkI(S__{X01^7`3c{<{Y} zPm}-r6Y8Jm<(nS%Ts+_Nx{l|i#Y;TD_m^J2?zNu(^`z$&iRY7l=Xvr<&)4ngx%_(1 ze=K<3LcZ}K>c7#;AK2o#dzI%)9`(HPYR@mBy!krMQ+sOv==@lCjpqZ&;kBNxvCoUB zd}!43o$T*zZ}xnqU5~YY2iU)V;pf+o9N+5O-+HsJzt0CfHyWNl%kfoW{mUsIe4m$p z%HBWG|D7Sf$Brjm17kifG0ge9^Yhoc+x0~EtS;BOwyi;YLe=+4ixTtWR4zToAb;(6l*U-Uee?LAFi z!Slk(FM0V5-0y#NpXY?o2;$TOK37^%T$DgM9l#WzUNa@!X<(5#@_0 z&kpwTCoi(~JHbZ1PiC)|zsmFQkeBbMdLDm)m!EaI=g9-T{IPYOmwwaB?;}rA|JifB zeEkN>&+xqLgz+QLheqwo+Phap~%9rSvwbub(^m6-qJ$pS#`PRKX zA9%Nyx3mxJb=Yjr_V4-F>rv`&)X&ZKdOf*1;Q0aSCwk7X*Dt6)Nxt?VZ_oby9DDs{ zuU&$D|DWqw=TI;2GM?+_dA7f&v)2sUo1p*aGTt%z_s3&={h#uq~ zw)wH|r?%J4{rT1WF+YFl?;n-xnrg4#{fqivP$j)S{CCd_Gtc|~+jHkjp1<)I&m(I* zAGOx=+DAPv`;_M)ea_Hc_V>QlpCu1^{=C)o#mcO8-4x1jd`B^KF|5PJg4KHzk8$SMfZEI>*o(^e=g&n!~STD zdiin7eEs3idVZ?sxki8Y-tM`3x0kQ}ljlO}`Cs(6toxe1zBl3Ji$CVMnt9$te^%?~ zJ?qbG&fo6co+qyH_4m`g+RBF>@ocBD<+S7Z^~~Sk`#s-vgO{(Q{Zn7;x%W0NpF@8a z-RgPeb6&pcYo5>gpXb5`&%dC5Td(*0zy9?j2&&h4UU;gnzx7R??fV(5e;YVH9@24a zdF(4*9>3djiTTd{;d$kHFaIs&JFfM76M5)z&;K~j*ALnLcZWTXzR}D7`eDz@|JI-W z;H%QMzgEY!y?$QzJfr<+udf{Id2AoA|B(|tPg^CuvZFn()_uxeXCLKx%U}C3IEUkL zh3+%d%c$r{<9y0yI6mv?!)#vi1ypMkJ#%rt!M2` z?CN>?6P~N~kf+xjL!K)-ukE#%^L3VvTYK5R*K7BO@{4`_ujsmF?Jd`O_F7J!)^TC4 z6S#h~8Q=X}KZ@JD{I=NJ>pkr4t-9ZH@*cl_U)?|c)1X56wbNGK3FeZkKlR*sx3B+> zi##vpcFJJ$YFSQ&5E1vQ4c&q2B-1C+%`TA?0^Y%`h>*e9IUY<#-KTGxO z^;Ca;^z*TPuCUi@b>Fk{CBOB2G3R&kwC6hY7yjDw=f2?U&-;z%tN!SD73E+0isz1g z?y=X0$@X)Qz2=-xIosd$OwT>a-+hhcP7p8j^?!Di=i%dgK3;RJ=lomhv|bNf?Kxxr zJ)iQW9N*VazH+(O-%R;3?oXwqUcY^;mmfXqdE-3K@1nf+BF|0A!?Ncsl&>f6_Ij@$ zFY^9f@;c80H~ISCV|=qY-VdRF?K8dnTI$#D@bQ0>`pZu7@_XoiaE#~wqy8xQmwWm4 zmc88TfB#;~dOs-ieD6)zB<7|-ACu|D~LlDEH{_Z#l&dfuSp(q3nD@e^KHKx^S)R}1_!)V^8~DssR^AEPT%T_1UoSgB;HLAZ*L>*O~1$U=5KlXmyz=go=@ibQDgo3nZEwO!(RTJJ@0C+cK&?dd9RnZ zd^&>cKF>ce0je;J+D0I<+rmx*7E#O;`}Ufe3x|I+5SkM@b%~Y(Q{?Wv;8}( zR=(wDp1&h3FC*Jyi{)|hqx*Z#sh{n^`jcM&)1UGj{?zl1XFM7r`Ta{Q>p4o-9eXX(`)!uz>Nv31X$O0* zXZ;vlzlT51=U?#rEV+J<=Y!wn>o2+A^HI#Vy)SOB3fpVlN&c^|KeW#CzxVRoPCfs4 zKhNRao-aGuvaYu}ZtZm)^Rbp}-@9q$8_4rc^Sqet4IJY+V0#z#=S$a#2fV%IT5jXd z-s$^xt|51i@cGt~b=&$8Zsc$;4zZXyTd-T&j|GjH|spZ(i& ziSmzLY`GKE{^Z;HQUCnu1d9&$_3z{SS;GDGe9oUS@|TYA_7>lxLFu)M_N(`KK9TJ& zqx>|=2fyUy_C4gb{d_6wS3MUvKReu?8rORHA>1z}xW6TAf4RUoa+dHLyHfBFEg z|F5rjo;c6ze|K-s-Sa&cF820fz3*wSQ#oFi(EeMv9&V=makjsL^4IG)u;U@T(Cd$- zo|nDU^LcD98}Yn^?X6(@A7*mN&dx!&ir*Ofo=@->S+@3_@-lb;{o zWA}R%jb7yC`@PA_JCWyy$n%FiPwTpE>o-pIoZaSm@hd(5;5C+Yef+2ArmkzY{=(36 zOdizlt?l(#|N0UH>*;U3e|^#C@mT+5M|yk1yx(xrj%$s zr*k|kq5db&_VqV%{rl!WE$jOHjed86%i7+4{(NtLfV}xO&-?QH*x-6~wa$Cn{s`BL z^En@;xL!^6k1t(cxt=w(nf`pw_wC&~%k#Pyd4BpOp4S}Xd8psMt{*SoxFqVUBvZt3;Cb@ z`AdWOtbaVm|NL+J_HX0(PlnY|y|$LT{R;07uQ}Fphdjyt8sT~3Na}B){8{$zH029V zxBB{di|36!KI84JU+4SlgnKRP`Rp>!*Y4qYgzLjGw-n0FCM)rDA`_<-elJZOX`!fwfuJ`H3ynO5gpN}`RJP*Fz>)-V<&-sm> zfB1XP-BHh<_^IViuzaECw0}P6ddvL3=Rz+Zr~Ijk=hc)y^$~BczSQgA$oaMS7VpnF zv6lz@zHdAI3s!phk<;GZ4&E;)(4W$KynNT4`;YVQinD$F73Am9{$j3oE#_x2*PC^0 zZ{AzHy}5tz{!Cop`)4!TYw`ZXBaDA7?@v6;_zPS=4rcq)ly5!G+uJnT$N%tMo`*}m zy;m{+J<5-I!poa``T95Ox!&eK+uif~AM@Phe)sDOJkKh4`6A}OK>PMRG3{SCZ@)(9xMX&TcPX6+0%V{t~ z`@dwo>m#rK0s6D?B+ug?_4T)sPu%2rfcI1WHtFpz=KY(M65@|8Ycjh)BSS)O-ce(igG?Da&~+YfnvxkP{C zzx0cP({>(DXL|ilb3Cp3tC#8t3yo%Kz{~AJ5u5eEqw~tJq(QX>Zw`UVcCA z4V~)S+lTgY>VNT6@9&1)eEqj_JS_9ogOk7D<)i$(KIci_{zBd_$@}}K6a0VOeG8Oi z$5q()C)*%w84E|)0%Q-lkHqy6+s?VX)%W_s7MF+BB5zn$%w{*3xF+Li@J zU>}eVA{ZiabT|q)5I_)f5FkJSVJpPMfQfMqCdiJ1Wg{Gc4H1q424s@^d#~!%t9t#J z*;PcJk@nWBdv8_Ux>a@S*6(f!H$)$vk?{LOA6_Tz=exxoeo!0^-zW9)&<9z5Ctu6y z-6!iQrLX4jQ_>&2^6NPK0LT%4Z~Ji!&&^%)!%Y8^U(fL`gm#3#rLX1ib@y`o=-nK? zQr0_O2J-}LFDzWHY`4C}!UGycyB{=>I$`0uHGf_?ce4u6c&ziaM+%Q*aRB>t8k z^`ieF{2qTL$3G3@GN$)CvR?EM*u9va<@-7Q4PYl=_#?7^u_W!cD)!wc zUd!?4CH%4_=Jy1wK|R{==_j`p-yxK5&}xzv~F|zw^@^|3g$>SYP}z z4nMKN@o2~3?<>EF`Bk6f@RtraeihQi-}kP|t7>3ALRHskQ@F!D*AE$P7c5KIP zziaLxN&n+Fa(M1z&m;U_4LN+Rgx|Wy{BHRzik`dub`D?lpE&%7lwX+ty_druXmk9B z1^=U>51$wR=Hp+>{I9w0`9$z{U(Deve}L0J{hJ(qQuOazk8pTT>boKB;SrJdWl%0G z&z)b+^gl%Pao1c!^y#5DaC}4J|7;ntGIk$5dD0=;Q#bLF#YjgWc;hC{GdN?=kOomgjfCI& zDyFYq!~Cwkio z+eto9zM~xe9+BTb`2X8$nEp|T|A!8TpOWx*iu|5|c8b5h5dQZH|DP54-2?3cf8Q?h zo0s@kUBl&l*NqqpexuiO_^}%)IQO8)^G30v%vBF|H}CHg{)7%AmMkFIQ((= z2kQsl@xb2~NFLz-{YDP|@XupB__v|m;IBK!{s4TR1%Gc7{eBPFLHK+1V~pQ|c8$M( z_$dzG4dVv>Zl&~KeIEJ|{Jlcj*L5HV{QaKvmmmHeuKz#0f%Eso1&$v|dutrw@Gr^u z|Cp5LX(%_A=WdCA4apPwYoY(hYdHQIiT|ARua64-tN%5}-zf15iyXc|#;cRU|B)}_ z^xrA`o_H~bf2zdz@0R_;pL{ik-y`9!!k7Ow^FJ!- zz5j1H{G&g@{BOoeg5Mv13x^*T`{|{UUX81J?xSF5d=<#&9_b(7@+R0L1!25b4}U)* z@fXCt8J^|%Wr@EG`bEUQ|C^X0j$0UhO!k{~f0V-));RvtU&7&c?{WA)O86QH*FMPc zwc9xUeUhJBznsJ0FX3k-JTKw*znbG;nBYtLpS%V6Jvz51`_E2D_x}x%uD_MveeJ2)W=m4z98Xmf&Ah4_@w0TeJ_;sq&_|) z>D?gm`Uc7GK{|?d5Uy}H%1t0f;AphT&a6{s2vY+j*g#ID1cS`T$@;@T= zdH;Jk{CNp~<^#;{sVBJpzg5C75&iG~Hpf3C@sCM)@00%HC2z*~N9V5iN-kepuWI=O-Bd3Gp8^DSX%5Q@_UPtCw+l*NQ&g_85on7X2Ik7Kh(a=k#9xat^;o z!XwGwgA)F&KjrwAl=m$XZb%3BxleKY)6)Mu^f3(FyKAoc z7aU&4@$EA3UrYG47eKxczE#4X_+`L<>0NW1pJn_7#DD4B+)%>LM|=q1Ea49T{>v|e z^{fBM_*V)4yZ)TRe|kIffAC2T|Kd+^{yy?zmfzbTZLFF{{*3#hx9oHHo+mlH`~Zia z{&Nnm?Q;03PjmR;@8j^}PjeXGr$&C){Vj*z`63SA@Ociu0{re6|Ad6!E9u|+cO3t1 z5`V+rb9e{xi}*ME0f#^P?>IbvC70(Z$qW@lSk?!}p84u6hCEKP~ycMZyT$6x&w zT>hsd{CJ=FJt_9cZwtTAO8LM0+d2Na|I6w9xbVAh8S{HDjE5+nM?cB&_e%aB{SuD< zpzwd>_c;DK;eX$YIR0|s_xR-;zE}9&`YFagA^dKZ`O(uNud8Hzr6%?Lkkt1FpgmIk z%KYq0?v(ufcjouGw{!U3KjCmoMLt)H z{@wOM#=lSM=LI6Kx&Oua`|wY5{FPF^82TfupS%B(ke`YFtD|iAcdhuZdvP}Y0jLj?{&LV?GyE~oCo}wG7XDvY@lDVt6MqQxZ-$?F zb+$bJ-on2L`a=`{i=h88!@pvM-({8WHCFsH(4I{CS3-L-!%tc1Uul*92d(^sR{Xvd z{t%3xCjVQk_+PO2{hEb;4Eh6;{zDf2&sp);Tjlv1OWwa}h3BH^EvstfY`cE6)!o|d zpYCkb+i|tN9gpBQ*NUR(+_1A1Mb{VN0hhItYB6DRDjw7u^+7#YuJ)Rp!Ep#STXEx5 zeJ?uG8TRAGdcCt9m)BJY6mRTzJ8$YW2XPM)L`LC!!sqDl+_|_{X+w5y>eV~LRvqw0 zYW_$IGK-n)m(L4PfJDdR{pMDDDsFdsBPvfgPn=KHx0_pM>#bpY{FkkuJ^V{UEf1i`WH-iQTU>zZ;rxnETaT9hDL zk|im|xC&iP3SVAd+lbVX1cAq~Zf6i*7?dZ}jFl6;n3T7lP@Ei>wwk{w9+W!`)TV^U zU20w0T9+jN+LJ4Cb)S_u7ZZe)P7|6!qqzmu+|b!?O5 zv@zVQV=Evvu8sC$OgPhA*PfLuK!vXc3i6-SsaELfNtDfgyI$#^ZT1GkdW+g-sx+=PODN-V^(5qAS4;R9NNTeVy&F}{YDi7K(MYIcMOKrvgA@s!tnGR)% z^mnW`Z2MY7Nh>h5&W{913aRwZRATE>HgPKwIG_Y*G%&Jmf6MZ)V}S|w5eB^x%#s=r zQ9EvL?Qz4_ZSz==Z@TLzq5I;-d#o2jL$7qsb#sJjDnj>=A*ycoRb%>&(auh$t$9bJ z1F*-2N_jpu?DgW#;OMZi9S@F!J)joXVQk)ywGx%x*_n{CAf@m~!|Y=r8ARR~gi)jd z_}^Z456p={9QB&p=LgYdbI^|mN0&RXTA5e#sSUatlLD0mJds#Upw*!PMxnV5l5ZFS>>3SgjJg?_fuX~Y-Gf;_H6S4*eQor9XG zb#Z_;NdvSUsdmz2R-2u;-eb*W{&|zsjP>NyMDdnp2{j<;0*|%oeK0vl#R6>q$K!Lo z`gR+XpT=h~W1^Z`s&uw`F-9Eg4#BqRA(_$dVAGkN0kwldvhdjXcx%^46G=|8;jfMC zGkp;9GUm44>?8%UjC7^Qq+T1gH{+g^nZ&#?09$|i=&;#p#Jv@0ei@XL-uhBYC!kx; z*fTkqOiPWWv-4fTay2`{m@_I_A8#O{-hzqH8?cEu(G&%S22ntoT3HfKW5;R#F;&PW ziRE5RL7&oUZf@=EMO%YAbXO)U@<=T&67-M4X|vVuc~~vh2iFrH#{f`7%^6~ zS?|Y{(dwoum-kkuLT^`W^i`_bqs_s_R=sm-<0y!Fzg$4++h;lE{(e$woM?j^W2?)SGEJFLyFJ?6Ifp4b$sHDzpL`X&N*u+#69q}e+h;A1 zIAFM>pSPeOMnqF?9+REs!sJUeXd);l~;2SMgMF{7%Q zKfBBEpeMr1Jsn_<=A1uYH7i)TG^{kxTCFcRuuFEQGSM%b>D$NS~xmgBhMJ{L+C%L#a zTuW_?eiJ*xJ2RPjXW3_E?P*Adk&s^?*%IUxJDXwRgPNF`{lE%Eqfza`(g#eD60uqH z5p<=S^O4BJ&&pvMO3aXV&{GRC4ea%Xd%XUUQ}8K?DLwbd=uQm@PGb9Gr%Gjw23+)Z zr`>PGYs9d_xV!?^cAH@uJvIgg@kR;!E(`UY!9pIbu+bu(o7HMtEn1aO z)L$=E`m_ol^+hQglLIT-?+maJ9lkCb#bbiy(9r2s=?r?qeseztN7HWHAZJ!sFbrXB z)p~VsEtV(dNBP0lK-~QdY#Av9ZVH(8b#a9>(R-?S5@g~d3l;#4jS%YSjAhl7$g-#= zA+fv36-Y&fa(e(hNgAWa>!_ANrtZPl@dH?-mytmYl*_39{v!hY05#AI!e zE@L5vu>P2ljG>o`FsLo0E{7D?7D*wpC33Y;>4UPx+dW(=ylHQkaP@c2iyP;=!&c*1 zx4l>28mRt2NW2tad~I#T{eEVe+Si2sRyLmUfQ<#kO0LTB#YWx1nf!XZ4a>#-geqI_ z{-P)kJ!_$7SSz9-9ZCl>ur#%b+#J{UlKe4$p9h zCxNO2$k6Mr$LBKG1qH>nVB*pg{Q!OE!igSON{K%Ebj#OqW~lR>4=qa^LDvR4GeR_fRX7a*-NCH~Q!a=E++?XnFk(@bwzKU0jUWfk_% zgNuDP8g!#`DYJZ$P(OLq1k$rwg0u5Yk<(rg^d}L zr5UW~G~Hx&?OEf*cm~wbIH$Y8e6r5NLn@C3KJieO#42zC_Jg6yC!(|^2_ug9x%2qh z@=QZX1@RANf#Ijt-C&YM-zr{EOh)-L#(UhtBdd4yR%=A7!L(~mq-Nv81c>$koN9K6 zOPU>rU52YnI^*e%6<>V>K!%_SIsw+aG{Xx;+{uV2+&Y7cbJRaH|Ci#FM3#>nSIh6ngkiJ_GYG zpdr-m*I*-sd-hdcFxiY-WL!9A2*umNGFXoBF%$2ZRAq6{+{o$13aSQeRgzlDRfAd} zts})u5Up+{XL>tGus?CD=Mrfrc+SOn;K)IxUxk%>a5RF+07dX-HPl&Uv~d~D5v1ds z+0=4goFu2Y4Y*{zQP+5?-rH_=k}{bV2zjq2@D40)ZGw@VurN4G>GAMz8ME&J%sAdq z5jRK_UN4A;{p3y1WMF!ZX5`>)QC1g@^ZlbD;H;YXr!NI8c)WB^7RHeIS})0c1r%ww zR^N>iMPN(HR8Jmfw$T%DzMW{CXG)JIQ?-t}#L(xFAm^K04$l(N1QgViER#~Uva~Se zEJw@dx?*CIZ1ZFIO1iIA%E9x1g*N&S7Ka@)6*4huC5rmzq4bT2aA=c67I7InNuobv z(&sFV@a8(Z(717Mhd^1(dRB9CDytG3DQofVV`#^DWj&no6BL}QCTV4*UW|)!H)UFy zOrj=QD$mlYu8o=dvSrvs2$hyrtHEe`|7`)YiMyTx9v8==D)~x}qslndD|X|fm)*mD zx)I02WcnKNFq*h_O!}EdmJ9<vGT%MAME<5avQbW+pES9z1`uP1*~ z&Q3D2E*x)WeumEi4xe-3M`J5Y}*SsXte`DCzyVZrjZ4-LXVfRV9zWydML6l+}giImn zGR*iD98%55q9s3l=F7IcCZ%Ev*XKhw`7@@|y`4vMizkQYR3r3!Cw*xSAwYjhX0^pY z&v)juhX9N++|=&niMqQ6TUHYU?7(rhhaZop!kE#@FW(H71Z-~?#A zk2)w>EJj8rlZou~5iH{b-N5pB_juO7Sm_(M{^qiXZjjD!k4}gRMOGlw(T#%x$)Q1y;t>bjG)vV`aResX>duc1~G=zaM&`&qmg-#xaGm|(7TOa@-R}rCwZr=(+4=J09!vH>+L-_=w+>5M08Aa%SW{0COSUq z)Zvf>P-Xj)T%u#?u|F}It;arxmAZOTCsSb?$kUp0Mw$kfX;wB%1`CqR!3;u!8y`%_ zY|g`8^|&FXe7i%#EZt$wu9V_o%R=@jC#}07%nNgO}BvypzPud7R__c#u=e ziAYcrQ$GqZ!7pZOO+Oada*pK3qEkYa)vXi5Oi4qAzJ^zo(}ta*S)M&dHe48#G7Yun zj5}rO9WXS8)Bbd%?)H?Xd=zegAbT9UMEw~T{L;^4h3hz!*qfT2Fkn+pM__?cBJ5tX zQPigS`cA1M0g*z^qz}S-2JrwqH~utgOBl&VEy_c8}^{#7-Qf#!iooZU>naNvY-6W*vl%H8#RR4xZjf$NM(POt@QdJR`~h7TVgTm{O9} zHO>K8gzYONTlM8~N^6Wc?m^Lod2t+8lctm#awb??cJZ!`5fY`7@!@R*;8k_9a(6op z4!CXE#bJ7Xmy=6jYgd!{x?HR$o(Q;zr!aLgXyV)+j<@%k=oHW*%A1!7H8t+6d6`6c z%*!;|qu^<(h3zrL%GrVm|J>M|k{G63itI4uk~p7{mc}2{NE^$@*AUYZO=hGe={T^* z*dOIIrSV2ul8S+Ok&LtnQ_3mABu1Kp_pC;mi`8^SnuEg(Mw*LDVH=y=NOQ2Bc+_{P z*0e^NEB(n2xoPd5!AMJzJ_I8zVU><KQ!lz`#N^Kc(2}}2AH6c%zA(t}tjB}dx0mqN=jC|vVrkvi8 zONw_=Dl&gV(0sYPwLj9A;$#du`$nN1zENmzD)e^ShaQ;Q@nk37M7XraEQjeSBxITj zxsxLhp=7dQE$K`~W3K8 zAMLlk=acRh?cJed%+Gb z@8+nj`m7#hkv(XJGNTnvM9HXmy4!-9 zAKfD@j^Z6()0nVjxc37cT55E!E}ar%2h*Ei(|mdy)oi8v;4kvx!8Pq$7cNBGbczrs z;!zVWLN4C$h1>MW(MI=io(Jr9-0sKl@BS`CnKu<-m(%2HEBg}7OYdWljJs64Ec`g_ zI%ES+P4fk^eM=fvBxzugNr}78(HYk-uTytttw2!}T_i}SFs#@rT<~e7??uBZg;TBL zIe3t;B({4ZP|p1YaJh^^v)|gQKoy^3;n~-4Y}kj>%sx)prMSc4Nw+n&hJ8?U*nC1a zq}swgVMilPrIXyC=qGgJfJj%#yr*kMG{?Lh69w;XF*!R0%!T*yB)D9g zS^CO*7Robl@oblyg_X}+F$*u}+R`kHwq_wJ@G8y54h`vXi^sEo|zJwmOz%qP99c7jIjgo|V6?PS4BJR;OnKZFPF0!nQgK zyQ$mi;UqFuTb&`vsoU!GX^d~Hg*?F`0+W_^zgBx)m>Cb6v294XFwmBQ@7fF+&!YgB zrwiGA>N$pzB!9(uj-@Ze=6;=wWUY|77r zo#E)sQ7T^pgQh9ipVE~a`PYYhPYUrw-?3vbSW1)0bAx#>PI&SW#I9(WC{W;c%+nP! zq-hYjXZYmG)J%+8NHpgl>`;*lj%&BFj8#8{kNaqRHv?PU#u62)*I4pmb()ae z<7}L`yfU=V4d!%uEYtgE6ovL5X(2*?c|FYJS@uP4H0V;`j!C$Z!t!rLC+cwNSjGQuWE@ZuhKa^I3M@ejooh3gNTl0Vn^n2*b#3` zss`lG%q%Q)duEN`Xhu$Dg95w7+};Y?>)dHtYVY?+bQLPPZq&ar&!&P5!W1;x~ zsGH8;4^PU0Qcg>O7x+(_vZu{WK%ea~e4YQKIVl=&;nID4R|%VIpAR#6?qOB zX1!03OW}L4m@{)J#87f4|12)3x23s7UcmLdJA)NHdy!DlJlw~EUkWsBNP2d{uIEiB zJuT5xI{g7|D~4A^#%V8G<(r5XJ(dQ0uB*O`r_scjTT`(5GGm%t9t{@{900LKeV zQ0KJ3gU=vd--!1s?QNLiz+%uOlw6w$DTH#Q)K}pSRiotxIPftk&!)_!CzzBNFLtcM z!s>1wX?+v!kiWhix6|h(vKe-!x(KOy*0ONiO`e{ZD#<9iY4!BcBdc&t)NnJZV1u|G z-uIe|Ht&dgT|hDJ&x8?iRXkY>bM$q zqE+99yT7NGhDD9K0veeLA!RAgdAp*K#@&W>OQcHDJSIZt4#L@L4zI036HUulkl-OT z(BTF_dtlfs$2=!59%P}3TD~oKe`ceam z)e@@yiDs*nzYU!S16`hV@CL~vX!;{t$@!fNP)_LnP}xk&*PyeCIs*qPu!ByGNtx*E zW}v7x$*`kif4~D823vFfT+hgba^N{^=uX#9fu(ij)IZ3lhWbQfd=@($ac$jvPQ3AU zeH~4&bZE>LrqBj0B9y+9?v!~>7@+Y5y!VFZLzMYhBr++J=bUsJaRV;mg7P^py676;S%?r4vl^(90i@>|xilLSq zb7e)#WRPWAb4DL$vPo2@sXP0fQ5(55TGf35;C81|!mV{)GWIr^m$A>r z$G7vPXy>9gQ?AJh8g8>?&At0&ri^XPmV>dI8r3{MCxzW#dB!v>AEWVn->eKL*=JLt z2LcZ-1>ejhxrF7g)61C@fb48V8@qpKY~G9WiBFWemil1v z0H>6OVbnG|a-7f>&9tUri@kLrv(S^rD81j~fVrJ50n9hwJ(ro*2SIKzMrO!Xe*y2^ z;~2AwlYvupv`)RdE&($$XPS@(xvM1@VAF@)DVcn_wnZ-(be5I}MP8A@QQw=%>dCES ze8D6eaSK)vqdGp*2xH~GpRN{p>EKqyndqkM|7T2uhH|v?#Dsu_kbLhZ{paB&F=2;2 zTY_Wzo7vbFH*jU01TW88>xJEj${z2YFcD~+QoCaIOWf^jM3o0&15Q)K2`$Ob+%;o?0`-5sp(b~Ps}I>E^c zvph4RVZp%xZ&yn>nAaG4O!{#wj9ST0E*!-84zx4<8K`Y5LBIQwt4H_EeT(>B+#T^` z)jqE4D^>!0G<>S$NOsctVG118OjFe%Sky9a0@GElmyNx%!I*V8a)hepXI0!?=FX~2 z%YR~Z^5)PtEp_AsmDt%e!jV;W=tpQ<*M5`|CzG zQHyFIFR{AFQ4e=_cYDHk;vD*T$_q{^h@w_=6LvF1Tdg~c3mzb0un=YE5!z4YU}VLR zM~c=}9Efb2QUv(vV5baRbG~(cH#%2uwqS^b1Jm_rZ`g~Y{bp}4tmC&aa}Bk;9+Q!b zhqUEQ&;0DdBYLwK7dIg+hu!rqa`nMdtMi69D|6FQ8#QLG>aa%?Uzf~$HD)VNpR|iO z6R&GE^VuS!YR@3hj0P2!UKPd1emCO54pl2@?Koq+Xdl>)Eul%gF~tn2n(}PM4P!Gg zr4Ax9SQYJ9^U7>&-U`ku2~&G58lM+8&iL~dWj^q~!z6BvUEsGrbJnw6QPWamt$25G zhPd4Y58{yZRvxX}sZB#kDcQ!qT5;^CDeu5M7dIU08r0rsnup8&oHm>`qE*T&i!S%1 zNU$h=c{qVp!FwNST@*fnLx%#EQYg|_N9n1?nf_ng8Do^4JQl5&>dx6fO9z5F4q`Vyjlce2OLTPzql%k6< z*M{v)c(B-()fi+F4YqZ5mC$D1xVHkEHdB5s>A?fixSpifeLPt!9fRxBo1I}SdxKe8 zx#|n>013RuW4}1X=x7397a7$Spw8eG4ZDmE1k5zwSrx!|j#rStJ-qPV1-)b$HQRfw z$_}15<`funkx?L`zF~~}Zj{UwQ6`F7%B06ht!+dF`6xq9JPEq56`KYfF7)!E>k?;f zojQ1N%uA|>+bZcH41F70_0HICRE6Q0E-HYBxC2fz|KkcIV7qd3^*w8eMajj-k4MZyEcO z4m|2`?0mi1!C|cu2FXSt8l|YW4fIfU@@D5&BDin>FI<6H2`JbubaBJ20od)>Uqeb~ zI=2JsXg%%^+bjm?y|h;1>u4k#_=>$7g|lKeW3ZNBeDGkJF(kfR7mMQ&tzw_<=~Zbu zJ?v;6qtUu^@@ZBaR?y*wl(nRoGrte7%UD5pfvco0o?`kR^eBOyIkJm?dh z?MU(~>Wg67JRXbxIEbJ02IBg|HSeTJ16X#o_InG$Xf>%9hREt*+~z$2-DePIjH zO;4oTgSD~;C&~GV;PsP4vpV&{38j?lTbDp>>FK|m2@Gp3X2G%RwmarxVk}wCprLgt zu`e7cWY5DWWuv=z+Gfjmwk`@6XSUUh+&Yj}V2g`^VJLgP_!cg5)7Fp{Cf6`uTg|M+ z(!&Fs(2cRuunWb!&4{8o(z~U(Xx=y4WF0{ZGqc_}0g-3#@g$U3=b7Xpe5|tGE1FwM zR(+`!Odyj&;;z;7vU=`yg!u9GOxMWYr@Z^Cue(?PU7oLM5 zBs&p9C*Ig$SuKDS0Z-Jy)}{t+ruJXTBSo=$0JTto8v=RWm~L!!?Bm@W*Og4ph}s}F z8+*2jQpJ1ha+`=H?@k)p-|D3)^j?R`y{{okp6SRt_1d-^EhVMWwAIpI@*_eHGy)XC_9Y9k8>s#on0>4*SDD~ zLC%>mJx9u!H8{@G;7A(<%egcQano{Ax62-XwG5U(rL)ENbHZIKu!k4ex)SDL5hr@! zQt_Zr-uUgjv6 zV;NUdFrkIym7-|StAlU40#RkP&}ib3<`_=#Mgy48B(-99*U&|J+A|Ec6jM7(Z8b9< zqY~6M+~3FSCcrl^Pr-zfUaE9-1Ng@CtR$To| z{HP!g984{N1;s(2d;aEoypYnXkJPTO9c^l{RBqsa8JF*20UQwlx_ z65d3GN8Mnzs6X4YJXEtlBU9y#W!Pc7p`zn^r&;taUb&&PoKUZC68-$l^e6KSiZ6g% zmJv0sypa>nWlk4ma!#BLKPz7409RLUgq|eqjpIgzuFS}|pH81U*N1DZ;YxhC8JQgJ z=4N`3a~Znh^^+T=Xk!ung&rFIM=I}00juAU!cDaN*gK>|av3#>QB;aB+E_S@QcRb| zDjnZDnpVb%X0Jb3>yp%GtrDz|Qno0?$;@0YQ>Qdj<-n#g_{%8Rfy(2pAy+~nS`U@J zG0nQESMR`;f4$~l1RWr`a@|J!Mr4l3FoqqB7mR{==aX95xE(Haw`U-^-q_B|fjHrK zOsC1g6O+HW&UX_Y^b|54q80!tI`8?aMZW*Y2n#B>Yf(21;nVawz76KM%uH<25T3r- z1Vb<$M0+ro=Qs2Ss<)y|lulo-R$V+>75YKg?YfS(>I&Mb>$MEwzc?&_#alvr%J*^<4Li5@>U$BaJ%UBG1)IKWQMp70@7l&vRDpCQST3Ii z!}#>ZQ8RsJp&J(;toDY7&XsTZrN(}dul$S<%}b}?S44``8WGi zstDG<`n0al1a!z7<_GpcN}g7mu(}8NT7i5WnJQn#0^Oy|S5SftPcTG=QM*3cjH69x z{#)l;Fe)1LchP1)Inj3p{ayewuVLTJ3gr-M9#pSfUVy9pVPcmR`96qS{#H!6C*>>s z($&{8`wLBXI}FL6F`cZ>|+*c44+dZdY} zr$>EizqCUKhGu%Ouk2{0BNiRJD%boOWo7OoWs^T{$xt&7V` zF^JnTZCcX0$2hN6T<) zPsS~eX0#xehR)LWWpy&lw^a992_=<1b&8W-?n(1p?A=%6&+&OV73yC8B8n+9>gmUP zT&QC|l;Zk09a3>k_Of!E8ksyDC3Dti6OxUa5@?MtLyO>nr)cG33$CVjEnWl(E$Vfq zYvAPqS(7LxpJ;6x_65W0HZbzkFIXH@jOpLKhb;vuJ@n$Kv4&loWDeu8W<6$$EXiOf5AR3dMSk+)*Y415Y zDI$2Qw%+PTa3s6e+&*vm4W^vgO}-C==1T0OhPfcPzrhCtf8d891=l@@?eEuJ!?gaC zL(W_XZZ@wRTk*L|Bu1P`FVm)(jOfr+--tJd+Yvaj8*sqrju@eB(;+xz<&uX6XYN)d zfgI3tPG^^9ubqi{S?pucTL)!cf@POXiPeJSqtWc|;Sro?B`@Rmp{}fVhn>c7kL+%| zXg`KWU_7?#2$tWC_5ec) zrSvT1#VM5V(qv=@x;~dU6nM;<9lTx@?^nTEx-|LG`!)?d338bMmpGjpiW|>nwxAcy z$_Rq!Tg3a-E=|5Rx%#piRI;ke*CeJIDAA3bO~&>+K+}bK!iDb2{ysYX$qli9?s=Oq z>xr0xFzcI`f-sX}dvZcB1-9@ICc22>3}$qDT;F9cFFc7v?INRshbHRvtjH8>DoD1c zShWjfaV?n~yvL;F8{Iv?&J@YhDoynC@zN?BvpWF}^g#s%6vvZ3L4Y$la*&&k`V5=A zYrYSAJK>D!c;@MQL1L89>jkhMPIxC@(^p&$#&_}TZrQ$*8xH%@y|f3)a9$Wm&ZpoV zhG%!PujsOX^{cx9ydvO4;R`-AV)+)_%shJyglVP2{hD3e=9Vds#a))>$1glP?F;YD z15(y~*v5;C@5az^=;_dotwT%4*Iz(q3kFL*8QhV9?LRsXH-}uBAx1`IORks^+L-pN zXJjSYGfp^pya?T6Dxs6?b1(+Ot(H+&BV1g>u@U|gT^&Qw2KRcF0**J(f(u;-0B0#+ z<_0ra=EjGdk2m+5jhJSLFjAe_%OUXtB*5S}Y>u&FC-c}RGd?MVF-M!&m6)9~^#Th30|0yiYe>+eu@7EIRKw=!qFPFFKo;+ISl$bMyoUsY$Y5 zc=TjKCaBGF^khN`&urRT>NWRr6}+{C+Yv_jr^f8ryHm&oLb=n28(Q>%Ew@ADENTf_n7aAwgfaxRmr| zsnUs=ix(Eb%brOa^<0Lt13MF(9q8ufW_2HWQTwRpn9i9KKYb3j17wmPZD!s!HQpR` zZ-DS`nD89%UN3r~IKMN&RkW;@+DfcPpyzZUsZHQ%Ph;3iPtBF#(jUL%sYPU#cCnh< zE>WhbhwAm(w_A^ zIV&SbRG%)Ao1v9WUwU;>(o9QdHtcyE_N7s`t0x_!hNGgUW8h481=F<#KF3XFwy9;E zb%$F`Cu4(o2yvrBnxvEgw0afJ-BkwgMi*V5pyrQk!M?vf>;c?&WoUX%YjDWG$Ej%K zQ}bs!J-92e4_o7Bp@&PQi;3-FGQIlNX*rN_Q-M1!hW+M#3>Px(#tjnPWO6Mpqbv0& zT$b%E3r>v=kU)ABb%Et$bTMMHxsi(xm=}^xCJx+e%qn)$orB5Quzq>i9JJOjpM?q7 zN~Gp_gDlQ3;qoZHtr1|oZMJIIw&U9Obj+}A(o7W-FKX7>xaL_AdKUaNTz-otSMC)$ z0g7CVYjw?{I{QH-Gh!B(m4>(SJcTsd*g;vPt;uHOmX)E;lM{$y*#y3Rz*lNRM=e%t z5L2T+6%XogrCD9Bl2a?7*m?}$jgHVW(Q#~i7Av`yiWJ0q(e0F6d>7l%%h@Omog-&n zvSV>U=@OBQ;fE^eE!$K)d?gF;z8@?Z4%GbmA>Ivqpwdmlv#5mA^6|P3lkp(0JlP7H zfkE1(WtF(Lx^cpvDi^x2O0;4m*Bv>lWUTlmgi)gYL(M#QSHsuag$h)7A1iB{OE+ z3Jxh16u&*t1j_{_=;M|C`BGF*j$o(4OeHzw)SyJQv(%C26&fq7wow#`i}3cAoSePu-Vy9MFc zGkkbSHs(BgG>@cE#Cf3)@9h}7cgj;UQLu;cTnyVr@$8vi_d5uq{%~_)1nZZuhXIOIcwp0cSku&q;xu*+BMmEq4n@JTz(QA1 zVPGpK>~mL+qo+n`oGE2Qeo<4(C_g+najD95NGahtbwl4;-MZ11MQ|s_)=_aID@=mR zanK|{qu~%NKD&jlZPD>$qbKoXPqri24|8zWTgr+8uS9V5u^t}W8%@Tec6JRe4Z>Qe zZ#MT;0C2wvFP6bw4|HEA?6!nA!cpTbOsYEvnr{m`UZad#UD5ugA=lhCZ4Gmt(Y2Tb z4rg9MejV&UX&153Ln-W(q1#Y~mwx9ED#?V`p%^_F*5kQWchul~U%Fddc-P~U_qdoD z2UXqc?oGH$YvO&lzTILlE@6)GAdBzj8Mulgf8QPrPH*xi{?=m#igGz%M3d58t(4f+w0RlDNU%sK!eHvHR^l{1{zcv zglN#7#k9ODDHv!_X;4GqvPRXQ(x8UIC5@^d`p^~1_KQ$4QeP{NT~u1Dh&c; zCtfP0G@%9q4Jr)+WRr)XCf8t~L8U=}Z1Pf&{}#fxr0g0DG^jMFp-`n%fd-WUrt#r? z3I-Zf8iZ)jo>H2Nl@ts#s5A(WU9iRF{I?LkB?Z%9ph2ZUfNYjaOPXARfd-WZ0kX*# zHMIr<4Jr)+WK%CMXmSk(8dMqt$R;n9G`R)?4Jr)+WRr)XCf8t~L8U-gGz%M z3YA9HpfbQTKAca%K!Zwy5DnTB<_fX25~kG;pk2m5M;oBhUmY~dnj?TYjygKfpwgg* z!ljf`0HDoD>1YFVAeI(WKA{E!4Jr)+WT&&VpveKWqXQjnfJ%SWP*_Si1sYTu1juF| zhA9n#c66Yl4N&Q?1{9>60u3q+YA941RfEa^)A(>c1p^H#4TkVo#v3HA=rLz9sffJICwY65 z6AYlmfwODa*@~j;p_m>VXs;}$%+3rvH)=C*AfPAEUMW?}c|^w&ZxvpMGk3Y>Qao(d zo;OPEQf9?Ob%QA4FiOE-CCIfB1>XeI03C>n%PC^0!9asbg8W zXi#YoAX_x0Apb3dZ;9?|FwmgVpoT(~QUw}RCYVcoX+EV1H5h0RZgEetqQFfaZC|($ z?Zv%*w*wn52BT&nJpCD)`Tuwg*8dMqt$d>BzQc4qQ zFaXfTR65!K9f(VdnxzH<4Jr+4C|pRX0u3q+0%T`tp_Km?!nZ_p8Voe3G^n94OsN75 pDh(P?piwmli