structstd.heap[src]

Types

Type FunctionSbrkAllocator[src]

Parameters

sbrk: *const fn (n: usize) usize

Values

Constantvtable[src]

Source Code

Source code
pub const vtable: Allocator.VTable = .{
    .alloc = alloc,
    .resize = resize,
    .remap = remap,
    .free = free,
}

Error Sets

Error SetError[src]

Errors

anyerror means the error set is known only at runtime.

OutOfMemory

Source Code

Source code
pub const Error = error{OutOfMemory}

Example Usage

test SbrkAllocator {
    _ = SbrkAllocator(struct {
        fn sbrk(_: usize) usize {
            return 0;
        }
    }.sbrk);
}

Source Code

Source code
pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
    return struct {
        pub const vtable: Allocator.VTable = .{
            .alloc = alloc,
            .resize = resize,
            .remap = remap,
            .free = free,
        };

        pub const Error = Allocator.Error;

        const max_usize = math.maxInt(usize);
        const ushift = math.Log2Int(usize);
        const bigpage_size = 64 * 1024;
        const pages_per_bigpage = bigpage_size / heap.pageSize();
        const bigpage_count = max_usize / bigpage_size;

        /// Because of storing free list pointers, the minimum size class is 3.
        const min_class = math.log2(math.ceilPowerOfTwoAssert(usize, 1 + @sizeOf(usize)));
        const size_class_count = math.log2(bigpage_size) - min_class;
        /// 0 - 1 bigpage
        /// 1 - 2 bigpages
        /// 2 - 4 bigpages
        /// etc.
        const big_size_class_count = math.log2(bigpage_count);

        var next_addrs = [1]usize{0} ** size_class_count;
        /// For each size class, points to the freed pointer.
        var frees = [1]usize{0} ** size_class_count;
        /// For each big size class, points to the freed pointer.
        var big_frees = [1]usize{0} ** big_size_class_count;

        // TODO don't do the naive locking strategy
        var lock: std.Thread.Mutex = .{};
        fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, return_address: usize) ?[*]u8 {
            _ = ctx;
            _ = return_address;
            lock.lock();
            defer lock.unlock();
            // Make room for the freelist next pointer.
            const actual_len = @max(len +| @sizeOf(usize), alignment.toByteUnits());
            const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
            const class = math.log2(slot_size) - min_class;
            if (class < size_class_count) {
                const addr = a: {
                    const top_free_ptr = frees[class];
                    if (top_free_ptr != 0) {
                        const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize))));
                        frees[class] = node.*;
                        break :a top_free_ptr;
                    }

                    const next_addr = next_addrs[class];
                    if (next_addr % heap.pageSize() == 0) {
                        const addr = allocBigPages(1);
                        if (addr == 0) return null;
                        //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
                        //    slot_size, class, addr,
                        //});
                        next_addrs[class] = addr + slot_size;
                        break :a addr;
                    } else {
                        next_addrs[class] = next_addr + slot_size;
                        break :a next_addr;
                    }
                };
                return @as([*]u8, @ptrFromInt(addr));
            }
            const bigpages_needed = bigPagesNeeded(actual_len);
            const addr = allocBigPages(bigpages_needed);
            return @as([*]u8, @ptrFromInt(addr));
        }

        fn resize(
            ctx: *anyopaque,
            buf: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) bool {
            _ = ctx;
            _ = return_address;
            lock.lock();
            defer lock.unlock();
            // We don't want to move anything from one size class to another, but we
            // can recover bytes in between powers of two.
            const buf_align = alignment.toByteUnits();
            const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
            const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
            const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
            const old_small_class = math.log2(old_small_slot_size) - min_class;
            if (old_small_class < size_class_count) {
                const new_small_slot_size = math.ceilPowerOfTwo(usize, new_actual_len) catch return false;
                return old_small_slot_size == new_small_slot_size;
            } else {
                const old_bigpages_needed = bigPagesNeeded(old_actual_len);
                const old_big_slot_pages = math.ceilPowerOfTwoAssert(usize, old_bigpages_needed);
                const new_bigpages_needed = bigPagesNeeded(new_actual_len);
                const new_big_slot_pages = math.ceilPowerOfTwo(usize, new_bigpages_needed) catch return false;
                return old_big_slot_pages == new_big_slot_pages;
            }
        }

        fn remap(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) ?[*]u8 {
            return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
        }

        fn free(
            ctx: *anyopaque,
            buf: []u8,
            alignment: mem.Alignment,
            return_address: usize,
        ) void {
            _ = ctx;
            _ = return_address;
            lock.lock();
            defer lock.unlock();
            const buf_align = alignment.toByteUnits();
            const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
            const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
            const class = math.log2(slot_size) - min_class;
            const addr = @intFromPtr(buf.ptr);
            if (class < size_class_count) {
                const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize))));
                node.* = frees[class];
                frees[class] = addr;
            } else {
                const bigpages_needed = bigPagesNeeded(actual_len);
                const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed);
                const big_slot_size_bytes = pow2_pages * bigpage_size;
                const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize))));
                const big_class = math.log2(pow2_pages);
                node.* = big_frees[big_class];
                big_frees[big_class] = addr;
            }
        }

        inline fn bigPagesNeeded(byte_count: usize) usize {
            return (byte_count + (bigpage_size + (@sizeOf(usize) - 1))) / bigpage_size;
        }

        fn allocBigPages(n: usize) usize {
            const pow2_pages = math.ceilPowerOfTwoAssert(usize, n);
            const slot_size_bytes = pow2_pages * bigpage_size;
            const class = math.log2(pow2_pages);

            const top_free_ptr = big_frees[class];
            if (top_free_ptr != 0) {
                const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize))));
                big_frees[class] = node.*;
                return top_free_ptr;
            }
            return sbrk(pow2_pages * pages_per_bigpage * heap.pageSize());
        }
    };
}

Type FunctionDebugAllocator[src]

Default initialization of this struct is deprecated; use .init instead.

Parameters

config: Config

Fields

backing_allocator: Allocator = std.heap.page_allocator
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count

Tracks the active bucket, which is the one that has free slots in it.

large_allocations: LargeAllocTable = .empty
total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init
requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init
mutex: @TypeOf(mutex_init) = mutex_init

Values

Constantinit[src]

Source Code

Source code
pub const init: Self = .{}

Error Sets

Error SetError[src]

Errors

anyerror means the error set is known only at runtime.

OutOfMemory

Source Code

Source code
pub const Error = error{OutOfMemory}

Functions

Functionallocator[src]

pub fn allocator(self: *Self) Allocator

Parameters

self: *Self

Source Code

Source code
pub fn allocator(self: *Self) Allocator {
    return .{
        .ptr = self,
        .vtable = &.{
            .alloc = alloc,
            .resize = resize,
            .remap = remap,
            .free = free,
        },
    };
}

FunctiondetectLeaks[src]

pub fn detectLeaks(self: *Self) bool

Emits log messages for leaks and then returns whether there were any leaks.

Parameters

self: *Self

Source Code

Source code
pub fn detectLeaks(self: *Self) bool {
    var leaks = false;

    for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
        var optional_bucket = init_optional_bucket;
        const slot_count = slot_counts[size_class_index];
        const used_bits_count = usedBitsCount(slot_count);
        while (optional_bucket) |bucket| {
            leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
            optional_bucket = bucket.prev;
        }
    }

    var it = self.large_allocations.valueIterator();
    while (it.next()) |large_alloc| {
        if (config.retain_metadata and large_alloc.freed) continue;
        const stack_trace = large_alloc.getStackTrace(.alloc);
        log.err("memory address 0x{x} leaked: {}", .{
            @intFromPtr(large_alloc.bytes.ptr), stack_trace,
        });
        leaks = true;
    }
    return leaks;
}

FunctionflushRetainedMetadata[src]

pub fn flushRetainedMetadata(self: *Self) void

Parameters

self: *Self

Source Code

Source code
pub fn flushRetainedMetadata(self: *Self) void {
    comptime assert(config.retain_metadata);
    self.freeRetainedMetadata();
    // also remove entries from large_allocations
    var it = self.large_allocations.iterator();
    while (it.next()) |large| {
        if (large.value_ptr.freed) {
            _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
        }
    }
}

Functiondeinit[src]

pub fn deinit(self: *Self) std.heap.Check

Returns std.heap.Check.leak if there were leaks; std.heap.Check.ok otherwise.

Parameters

self: *Self

Source Code

Source code
pub fn deinit(self: *Self) std.heap.Check {
    const leaks = if (config.safety) self.detectLeaks() else false;
    if (config.retain_metadata) self.freeRetainedMetadata();
    self.large_allocations.deinit(self.backing_allocator);
    self.* = undefined;
    return if (leaks) .leak else .ok;
}

Source Code

Source code
pub fn DebugAllocator(comptime config: Config) type {
    return struct {
        backing_allocator: Allocator = std.heap.page_allocator,
        /// Tracks the active bucket, which is the one that has free slots in it.
        buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
        large_allocations: LargeAllocTable = .empty,
        total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
        requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
        mutex: @TypeOf(mutex_init) = mutex_init,

        const Self = @This();

        pub const init: Self = .{};

        /// These can be derived from size_class_index but the calculation is nontrivial.
        const slot_counts: [small_bucket_count]SlotIndex = init: {
            @setEvalBranchQuota(10000);
            var result: [small_bucket_count]SlotIndex = undefined;
            for (&result, 0..) |*elem, i| elem.* = calculateSlotCount(i);
            break :init result;
        };

        comptime {
            assert(math.isPowerOfTwo(page_size));
        }

        const page_size = config.page_size;
        const page_align: mem.Alignment = .fromByteUnits(page_size);
        /// Integer type for pointing to slots in a small allocation
        const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);

        const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
        const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};

        const mutex_init = if (config.MutexType) |T|
            T{}
        else if (config.thread_safe)
            std.Thread.Mutex{}
        else
            DummyMutex{};

        const DummyMutex = struct {
            inline fn lock(_: *DummyMutex) void {}
            inline fn unlock(_: *DummyMutex) void {}
        };

        const stack_n = config.stack_trace_frames;
        const one_trace_size = @sizeOf(usize) * stack_n;
        const traces_per_slot = 2;

        pub const Error = mem.Allocator.Error;

        /// Avoids creating buckets that would only be able to store a small
        /// number of slots. Value of 1 means 2 is the minimum slot count.
        const minimum_slots_per_bucket_log2 = 1;
        const small_bucket_count = math.log2(page_size) - minimum_slots_per_bucket_log2;
        const largest_bucket_object_size = 1 << (small_bucket_count - 1);
        const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);

        const bucketCompare = struct {
            fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
                return std.math.order(@intFromPtr(a.page), @intFromPtr(b.page));
            }
        }.compare;

        const LargeAlloc = struct {
            bytes: []u8,
            requested_size: if (config.enable_memory_limit) usize else void,
            stack_addresses: [trace_n][stack_n]usize,
            freed: if (config.retain_metadata) bool else void,
            alignment: if (config.never_unmap and config.retain_metadata) mem.Alignment else void,

            const trace_n = if (config.retain_metadata) traces_per_slot else 1;

            fn dumpStackTrace(self: *LargeAlloc, trace_kind: TraceKind) void {
                std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
            }

            fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
                assert(@intFromEnum(trace_kind) < trace_n);
                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
                var len: usize = 0;
                while (len < stack_n and stack_addresses[len] != 0) {
                    len += 1;
                }
                return .{
                    .instruction_addresses = stack_addresses,
                    .index = len,
                };
            }

            fn captureStackTrace(self: *LargeAlloc, ret_addr: usize, trace_kind: TraceKind) void {
                assert(@intFromEnum(trace_kind) < trace_n);
                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
                collectStackTrace(ret_addr, stack_addresses);
            }
        };
        const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);

        /// Bucket: In memory, in order:
        /// * BucketHeader
        /// * bucket_used_bits: [N]usize, // 1 bit for every slot
        /// -- below only exists when config.safety is true --
        /// * requested_sizes: [N]LargestSizeClassInt // 1 int for every slot
        /// * log2_ptr_aligns: [N]u8 // 1 byte for every slot
        /// -- above only exists when config.safety is true --
        /// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
        const BucketHeader = struct {
            allocated_count: SlotIndex,
            freed_count: SlotIndex,
            prev: ?*BucketHeader,
            next: ?*BucketHeader,
            canary: usize = config.canary,

            fn fromPage(page_addr: usize, slot_count: usize) *BucketHeader {
                const unaligned = page_addr + page_size - bucketSize(slot_count);
                return @ptrFromInt(unaligned & ~(@as(usize, @alignOf(BucketHeader)) - 1));
            }

            fn usedBits(bucket: *BucketHeader, index: usize) *usize {
                const ptr: [*]u8 = @ptrCast(bucket);
                const bits: [*]usize = @alignCast(@ptrCast(ptr + @sizeOf(BucketHeader)));
                return &bits[index];
            }

            fn requestedSizes(bucket: *BucketHeader, slot_count: usize) []LargestSizeClassInt {
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(slot_count);
                const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
                return sizes[0..slot_count];
            }

            fn log2PtrAligns(bucket: *BucketHeader, slot_count: usize) []mem.Alignment {
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
                const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(slot_count);
                return @ptrCast(aligns_ptr[0..slot_count]);
            }

            fn stackTracePtr(
                bucket: *BucketHeader,
                slot_count: usize,
                slot_index: SlotIndex,
                trace_kind: TraceKind,
            ) *[stack_n]usize {
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(slot_count);
                const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
                    @intFromEnum(trace_kind) * @as(usize, one_trace_size);
                return @ptrCast(@alignCast(addr));
            }

            fn captureStackTrace(
                bucket: *BucketHeader,
                ret_addr: usize,
                slot_count: usize,
                slot_index: SlotIndex,
                trace_kind: TraceKind,
            ) void {
                // Initialize them to 0. When determining the count we must look
                // for non zero addresses.
                const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
                collectStackTrace(ret_addr, stack_addresses);
            }
        };

        pub fn allocator(self: *Self) Allocator {
            return .{
                .ptr = self,
                .vtable = &.{
                    .alloc = alloc,
                    .resize = resize,
                    .remap = remap,
                    .free = free,
                },
            };
        }

        fn bucketStackTrace(
            bucket: *BucketHeader,
            slot_count: usize,
            slot_index: SlotIndex,
            trace_kind: TraceKind,
        ) StackTrace {
            const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
            var len: usize = 0;
            while (len < stack_n and stack_addresses[len] != 0) {
                len += 1;
            }
            return .{
                .instruction_addresses = stack_addresses,
                .index = len,
            };
        }

        fn bucketRequestedSizesStart(slot_count: usize) usize {
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
            return mem.alignForward(
                usize,
                @sizeOf(BucketHeader) + usedBitsSize(slot_count),
                @alignOf(LargestSizeClassInt),
            );
        }

        fn bucketAlignsStart(slot_count: usize) usize {
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
            return bucketRequestedSizesStart(slot_count) + (@sizeOf(LargestSizeClassInt) * slot_count);
        }

        fn bucketStackFramesStart(slot_count: usize) usize {
            const unaligned_start = if (config.safety)
                bucketAlignsStart(slot_count) + slot_count
            else
                @sizeOf(BucketHeader) + usedBitsSize(slot_count);
            return mem.alignForward(usize, unaligned_start, @alignOf(usize));
        }

        fn bucketSize(slot_count: usize) usize {
            return bucketStackFramesStart(slot_count) + one_trace_size * traces_per_slot * slot_count;
        }

        /// This is executed only at compile-time to prepopulate a lookup table.
        fn calculateSlotCount(size_class_index: usize) SlotIndex {
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            var lower: usize = 1 << minimum_slots_per_bucket_log2;
            var upper: usize = (page_size - bucketSize(lower)) / size_class;
            while (upper > lower) {
                const proposed: usize = lower + (upper - lower) / 2;
                if (proposed == lower) return lower;
                const slots_end = proposed * size_class;
                const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
                const end = header_begin + bucketSize(proposed);
                if (end > page_size) {
                    upper = proposed - 1;
                } else {
                    lower = proposed;
                }
            }
            const slots_end = lower * size_class;
            const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
            const end = header_begin + bucketSize(lower);
            assert(end <= page_size);
            return lower;
        }

        fn usedBitsCount(slot_count: usize) usize {
            return (slot_count + (@bitSizeOf(usize) - 1)) / @bitSizeOf(usize);
        }

        fn usedBitsSize(slot_count: usize) usize {
            return usedBitsCount(slot_count) * @sizeOf(usize);
        }

        fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) bool {
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_count = slot_counts[size_class_index];
            var leaks = false;
            for (0..used_bits_count) |used_bits_byte| {
                const used_int = bucket.usedBits(used_bits_byte).*;
                if (used_int != 0) {
                    for (0..@bitSizeOf(usize)) |bit_index_usize| {
                        const bit_index: Log2USize = @intCast(bit_index_usize);
                        const is_used = @as(u1, @truncate(used_int >> bit_index)) != 0;
                        if (is_used) {
                            const slot_index: SlotIndex = @intCast(used_bits_byte * @bitSizeOf(usize) + bit_index);
                            const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
                            const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
                            const addr = page_addr + slot_index * size_class;
                            log.err("memory address 0x{x} leaked: {}", .{ addr, stack_trace });
                            leaks = true;
                        }
                    }
                }
            }
            return leaks;
        }

        /// Emits log messages for leaks and then returns whether there were any leaks.
        pub fn detectLeaks(self: *Self) bool {
            var leaks = false;

            for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
                var optional_bucket = init_optional_bucket;
                const slot_count = slot_counts[size_class_index];
                const used_bits_count = usedBitsCount(slot_count);
                while (optional_bucket) |bucket| {
                    leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
                    optional_bucket = bucket.prev;
                }
            }

            var it = self.large_allocations.valueIterator();
            while (it.next()) |large_alloc| {
                if (config.retain_metadata and large_alloc.freed) continue;
                const stack_trace = large_alloc.getStackTrace(.alloc);
                log.err("memory address 0x{x} leaked: {}", .{
                    @intFromPtr(large_alloc.bytes.ptr), stack_trace,
                });
                leaks = true;
            }
            return leaks;
        }

        fn freeRetainedMetadata(self: *Self) void {
            comptime assert(config.retain_metadata);
            if (config.never_unmap) {
                // free large allocations that were intentionally leaked by never_unmap
                var it = self.large_allocations.iterator();
                while (it.next()) |large| {
                    if (large.value_ptr.freed) {
                        self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.alignment, @returnAddress());
                    }
                }
            }
        }

        pub fn flushRetainedMetadata(self: *Self) void {
            comptime assert(config.retain_metadata);
            self.freeRetainedMetadata();
            // also remove entries from large_allocations
            var it = self.large_allocations.iterator();
            while (it.next()) |large| {
                if (large.value_ptr.freed) {
                    _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
                }
            }
        }

        /// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
        pub fn deinit(self: *Self) std.heap.Check {
            const leaks = if (config.safety) self.detectLeaks() else false;
            if (config.retain_metadata) self.freeRetainedMetadata();
            self.large_allocations.deinit(self.backing_allocator);
            self.* = undefined;
            return if (leaks) .leak else .ok;
        }

        fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
            if (stack_n == 0) return;
            @memset(addresses, 0);
            var stack_trace: StackTrace = .{
                .instruction_addresses = addresses,
                .index = 0,
            };
            std.debug.captureStackTrace(first_trace_addr, &stack_trace);
        }

        fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
            var addresses: [stack_n]usize = @splat(0);
            var second_free_stack_trace: StackTrace = .{
                .instruction_addresses = &addresses,
                .index = 0,
            };
            std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
            log.err("Double free detected. Allocation: {} First free: {} Second free: {}", .{
                alloc_stack_trace, free_stack_trace, second_free_stack_trace,
            });
        }

        /// This function assumes the object is in the large object storage regardless
        /// of the parameters.
        fn resizeLarge(
            self: *Self,
            old_mem: []u8,
            alignment: mem.Alignment,
            new_size: usize,
            ret_addr: usize,
            may_move: bool,
        ) ?[*]u8 {
            if (config.retain_metadata and may_move) {
                // Before looking up the entry (since this could invalidate
                // it), we must reserve space for the new entry in case the
                // allocation is relocated.
                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
            }

            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
                if (config.safety) {
                    @panic("Invalid free");
                } else {
                    unreachable;
                }
            };

            if (config.retain_metadata and entry.value_ptr.freed) {
                if (config.safety) {
                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
                    @panic("Unrecoverable double free");
                } else {
                    unreachable;
                }
            }

            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace: StackTrace = .{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(ret_addr, &free_stack_trace);
                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                    entry.value_ptr.bytes.len,
                    old_mem.len,
                    entry.value_ptr.getStackTrace(.alloc),
                    free_stack_trace,
                });
            }

            // If this would move the allocation into a small size class,
            // refuse the request, because it would require creating small
            // allocation metadata.
            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_size - 1), @intFromEnum(alignment));
            if (new_size_class_index < self.buckets.len) return null;

            // Do memory limit accounting with requested sizes rather than what
            // backing_allocator returns because if we want to return
            // error.OutOfMemory, we have to leave allocation untouched, and
            // that is impossible to guarantee after calling
            // backing_allocator.rawResize.
            const prev_req_bytes = self.total_requested_bytes;
            if (config.enable_memory_limit) {
                const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
                    return null;
                }
                self.total_requested_bytes = new_req_bytes;
            }

            const opt_resized_ptr = if (may_move)
                self.backing_allocator.rawRemap(old_mem, alignment, new_size, ret_addr)
            else if (self.backing_allocator.rawResize(old_mem, alignment, new_size, ret_addr))
                old_mem.ptr
            else
                null;

            const resized_ptr = opt_resized_ptr orelse {
                if (config.enable_memory_limit) {
                    self.total_requested_bytes = prev_req_bytes;
                }
                return null;
            };

            if (config.enable_memory_limit) {
                entry.value_ptr.requested_size = new_size;
            }

            if (config.verbose_log) {
                log.info("large resize {d} bytes at {*} to {d} at {*}", .{
                    old_mem.len, old_mem.ptr, new_size, resized_ptr,
                });
            }
            entry.value_ptr.bytes = resized_ptr[0..new_size];
            if (config.resize_stack_traces)
                entry.value_ptr.captureStackTrace(ret_addr, .alloc);

            // Update the key of the hash map if the memory was relocated.
            if (resized_ptr != old_mem.ptr) {
                const large_alloc = entry.value_ptr.*;
                if (config.retain_metadata) {
                    entry.value_ptr.freed = true;
                    entry.value_ptr.captureStackTrace(ret_addr, .free);
                } else {
                    self.large_allocations.removeByPtr(entry.key_ptr);
                }

                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(resized_ptr));
                if (config.retain_metadata and !config.never_unmap) {
                    // Backing allocator may be reusing memory that we're retaining metadata for
                    assert(!gop.found_existing or gop.value_ptr.freed);
                } else {
                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
                }
                gop.value_ptr.* = large_alloc;
            }

            return resized_ptr;
        }

        /// This function assumes the object is in the large object storage regardless
        /// of the parameters.
        fn freeLarge(
            self: *Self,
            old_mem: []u8,
            alignment: mem.Alignment,
            ret_addr: usize,
        ) void {
            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
                if (config.safety) {
                    @panic("Invalid free");
                } else {
                    unreachable;
                }
            };

            if (config.retain_metadata and entry.value_ptr.freed) {
                if (config.safety) {
                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
                    return;
                } else {
                    unreachable;
                }
            }

            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace = StackTrace{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(ret_addr, &free_stack_trace);
                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                    entry.value_ptr.bytes.len,
                    old_mem.len,
                    entry.value_ptr.getStackTrace(.alloc),
                    free_stack_trace,
                });
            }

            if (!config.never_unmap) {
                self.backing_allocator.rawFree(old_mem, alignment, ret_addr);
            }

            if (config.enable_memory_limit) {
                self.total_requested_bytes -= entry.value_ptr.requested_size;
            }

            if (config.verbose_log) {
                log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
            }

            if (!config.retain_metadata) {
                assert(self.large_allocations.remove(@intFromPtr(old_mem.ptr)));
            } else {
                entry.value_ptr.freed = true;
                entry.value_ptr.captureStackTrace(ret_addr, .free);
            }
        }

        fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            if (config.enable_memory_limit) {
                const new_req_bytes = self.total_requested_bytes + len;
                if (new_req_bytes > self.requested_memory_limit) return null;
                self.total_requested_bytes = new_req_bytes;
            }

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                @branchHint(.unlikely);
                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
                const ptr = self.backing_allocator.rawAlloc(len, alignment, ret_addr) orelse return null;
                const slice = ptr[0..len];

                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(slice.ptr));
                if (config.retain_metadata and !config.never_unmap) {
                    // Backing allocator may be reusing memory that we're retaining metadata for
                    assert(!gop.found_existing or gop.value_ptr.freed);
                } else {
                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
                }
                gop.value_ptr.bytes = slice;
                if (config.enable_memory_limit)
                    gop.value_ptr.requested_size = len;
                gop.value_ptr.captureStackTrace(ret_addr, .alloc);
                if (config.retain_metadata) {
                    gop.value_ptr.freed = false;
                    if (config.never_unmap) {
                        gop.value_ptr.alignment = alignment;
                    }
                }

                if (config.verbose_log) {
                    log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
                }
                return slice.ptr;
            }

            const slot_count = slot_counts[size_class_index];

            if (self.buckets[size_class_index]) |bucket| {
                @branchHint(.likely);
                const slot_index = bucket.allocated_count;
                if (slot_index < slot_count) {
                    @branchHint(.likely);
                    bucket.allocated_count = slot_index + 1;
                    const used_bits_byte = bucket.usedBits(slot_index / @bitSizeOf(usize));
                    const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
                    used_bits_byte.* |= (@as(usize, 1) << used_bit_index);
                    const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
                    if (config.stack_trace_frames > 0) {
                        bucket.captureStackTrace(ret_addr, slot_count, slot_index, .alloc);
                    }
                    if (config.safety) {
                        bucket.requestedSizes(slot_count)[slot_index] = @intCast(len);
                        bucket.log2PtrAligns(slot_count)[slot_index] = alignment;
                    }
                    const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
                    const addr = page_addr + slot_index * size_class;
                    if (config.verbose_log) {
                        log.info("small alloc {d} bytes at 0x{x}", .{ len, addr });
                    }
                    return @ptrFromInt(addr);
                }
            }

            const page = self.backing_allocator.rawAlloc(page_size, page_align, @returnAddress()) orelse
                return null;
            const bucket: *BucketHeader = .fromPage(@intFromPtr(page), slot_count);
            bucket.* = .{
                .allocated_count = 1,
                .freed_count = 0,
                .prev = self.buckets[size_class_index],
                .next = null,
            };
            if (self.buckets[size_class_index]) |old_head| {
                old_head.next = bucket;
            }
            self.buckets[size_class_index] = bucket;

            if (!config.backing_allocator_zeroes) {
                @memset(@as([*]usize, @as(*[1]usize, bucket.usedBits(0)))[0..usedBitsCount(slot_count)], 0);
                if (config.safety) @memset(bucket.requestedSizes(slot_count), 0);
            }

            bucket.usedBits(0).* = 0b1;

            if (config.stack_trace_frames > 0) {
                bucket.captureStackTrace(ret_addr, slot_count, 0, .alloc);
            }

            if (config.safety) {
                bucket.requestedSizes(slot_count)[0] = @intCast(len);
                bucket.log2PtrAligns(slot_count)[0] = alignment;
            }

            if (config.verbose_log) {
                log.info("small alloc {d} bytes at 0x{x}", .{ len, @intFromPtr(page) });
            }

            return page;
        }

        fn resize(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) bool {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                return self.resizeLarge(memory, alignment, new_len, return_address, false) != null;
            } else {
                return resizeSmall(self, memory, alignment, new_len, return_address, size_class_index);
            }
        }

        fn remap(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                return self.resizeLarge(memory, alignment, new_len, return_address, true);
            } else {
                return if (resizeSmall(self, memory, alignment, new_len, return_address, size_class_index)) memory.ptr else null;
            }
        }

        fn free(
            context: *anyopaque,
            old_memory: []u8,
            alignment: mem.Alignment,
            return_address: usize,
        ) void {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(old_memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                @branchHint(.unlikely);
                self.freeLarge(old_memory, alignment, return_address);
                return;
            }

            const slot_count = slot_counts[size_class_index];
            const freed_addr = @intFromPtr(old_memory.ptr);
            const page_addr = freed_addr & ~(page_size - 1);
            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
            if (bucket.canary != config.canary) @panic("Invalid free");
            const page_offset = freed_addr - page_addr;
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_index: SlotIndex = @intCast(page_offset / size_class);
            const used_byte_index = slot_index / @bitSizeOf(usize);
            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
            const used_byte = bucket.usedBits(used_byte_index);
            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
            if (!is_used) {
                if (config.safety) {
                    reportDoubleFree(
                        return_address,
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        bucketStackTrace(bucket, slot_count, slot_index, .free),
                    );
                    // Recoverable since this is a free.
                    return;
                } else {
                    unreachable;
                }
            }

            // Definitely an in-use small alloc now.
            if (config.safety) {
                const requested_size = bucket.requestedSizes(slot_count)[slot_index];
                if (requested_size == 0) @panic("Invalid free");
                const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
                if (old_memory.len != requested_size or alignment != slot_alignment) {
                    var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                    var free_stack_trace: StackTrace = .{
                        .instruction_addresses = &addresses,
                        .index = 0,
                    };
                    std.debug.captureStackTrace(return_address, &free_stack_trace);
                    if (old_memory.len != requested_size) {
                        log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                            requested_size,
                            old_memory.len,
                            bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                            free_stack_trace,
                        });
                    }
                    if (alignment != slot_alignment) {
                        log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
                            slot_alignment.toByteUnits(),
                            alignment.toByteUnits(),
                            bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                            free_stack_trace,
                        });
                    }
                }
            }

            if (config.enable_memory_limit) {
                self.total_requested_bytes -= old_memory.len;
            }

            if (config.stack_trace_frames > 0) {
                // Capture stack trace to be the "first free", in case a double free happens.
                bucket.captureStackTrace(return_address, slot_count, slot_index, .free);
            }

            used_byte.* &= ~(@as(usize, 1) << used_bit_index);
            if (config.safety) {
                bucket.requestedSizes(slot_count)[slot_index] = 0;
            }
            bucket.freed_count += 1;
            if (bucket.freed_count == bucket.allocated_count) {
                if (bucket.prev) |prev| {
                    prev.next = bucket.next;
                }

                if (bucket.next) |next| {
                    assert(self.buckets[size_class_index] != bucket);
                    next.prev = bucket.prev;
                } else {
                    assert(self.buckets[size_class_index] == bucket);
                    self.buckets[size_class_index] = bucket.prev;
                }

                if (!config.never_unmap) {
                    const page: [*]align(page_size) u8 = @ptrFromInt(page_addr);
                    self.backing_allocator.rawFree(page[0..page_size], page_align, @returnAddress());
                }
            }
            if (config.verbose_log) {
                log.info("small free {d} bytes at {*}", .{ old_memory.len, old_memory.ptr });
            }
        }

        fn resizeSmall(
            self: *Self,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
            size_class_index: usize,
        ) bool {
            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_len - 1), @intFromEnum(alignment));
            if (!config.safety) return new_size_class_index == size_class_index;
            const slot_count = slot_counts[size_class_index];
            const memory_addr = @intFromPtr(memory.ptr);
            const page_addr = memory_addr & ~(page_size - 1);
            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
            if (bucket.canary != config.canary) @panic("Invalid free");
            const page_offset = memory_addr - page_addr;
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_index: SlotIndex = @intCast(page_offset / size_class);
            const used_byte_index = slot_index / @bitSizeOf(usize);
            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
            const used_byte = bucket.usedBits(used_byte_index);
            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
            if (!is_used) {
                reportDoubleFree(
                    return_address,
                    bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                    bucketStackTrace(bucket, slot_count, slot_index, .free),
                );
                // Recoverable since this is a free.
                return false;
            }

            // Definitely an in-use small alloc now.
            const requested_size = bucket.requestedSizes(slot_count)[slot_index];
            if (requested_size == 0) @panic("Invalid free");
            const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
            if (memory.len != requested_size or alignment != slot_alignment) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace: StackTrace = .{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(return_address, &free_stack_trace);
                if (memory.len != requested_size) {
                    log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                        requested_size,
                        memory.len,
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        free_stack_trace,
                    });
                }
                if (alignment != slot_alignment) {
                    log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
                        slot_alignment.toByteUnits(),
                        alignment.toByteUnits(),
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        free_stack_trace,
                    });
                }
            }

            if (new_size_class_index != size_class_index) return false;

            const prev_req_bytes = self.total_requested_bytes;
            if (config.enable_memory_limit) {
                const new_req_bytes = prev_req_bytes - memory.len + new_len;
                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
                    return false;
                }
                self.total_requested_bytes = new_req_bytes;
            }

            if (memory.len > new_len) @memset(memory[new_len..], undefined);
            if (config.verbose_log)
                log.info("small resize {d} bytes at {*} to {d}", .{ memory.len, memory.ptr, new_len });

            if (config.safety)
                bucket.requestedSizes(slot_count)[slot_index] = @intCast(new_len);

            if (config.resize_stack_traces)
                bucket.captureStackTrace(return_address, slot_count, slot_index, .alloc);

            return true;
        }
    };
}

Type FunctionDebugAllocator[src]

Default initialization of this struct is deprecated; use .init instead.

Parameters

config: Config

Fields

backing_allocator: Allocator = std.heap.page_allocator
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count

Tracks the active bucket, which is the one that has free slots in it.

large_allocations: LargeAllocTable = .empty
total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init
requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init
mutex: @TypeOf(mutex_init) = mutex_init

Values

Constantinit[src]

Source Code

Source code
pub const init: Self = .{}

Error Sets

Error SetError[src]

Errors

anyerror means the error set is known only at runtime.

OutOfMemory

Source Code

Source code
pub const Error = error{OutOfMemory}

Functions

Functionallocator[src]

pub fn allocator(self: *Self) Allocator

Parameters

self: *Self

Source Code

Source code
pub fn allocator(self: *Self) Allocator {
    return .{
        .ptr = self,
        .vtable = &.{
            .alloc = alloc,
            .resize = resize,
            .remap = remap,
            .free = free,
        },
    };
}

FunctiondetectLeaks[src]

pub fn detectLeaks(self: *Self) bool

Emits log messages for leaks and then returns whether there were any leaks.

Parameters

self: *Self

Source Code

Source code
pub fn detectLeaks(self: *Self) bool {
    var leaks = false;

    for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
        var optional_bucket = init_optional_bucket;
        const slot_count = slot_counts[size_class_index];
        const used_bits_count = usedBitsCount(slot_count);
        while (optional_bucket) |bucket| {
            leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
            optional_bucket = bucket.prev;
        }
    }

    var it = self.large_allocations.valueIterator();
    while (it.next()) |large_alloc| {
        if (config.retain_metadata and large_alloc.freed) continue;
        const stack_trace = large_alloc.getStackTrace(.alloc);
        log.err("memory address 0x{x} leaked: {}", .{
            @intFromPtr(large_alloc.bytes.ptr), stack_trace,
        });
        leaks = true;
    }
    return leaks;
}

FunctionflushRetainedMetadata[src]

pub fn flushRetainedMetadata(self: *Self) void

Parameters

self: *Self

Source Code

Source code
pub fn flushRetainedMetadata(self: *Self) void {
    comptime assert(config.retain_metadata);
    self.freeRetainedMetadata();
    // also remove entries from large_allocations
    var it = self.large_allocations.iterator();
    while (it.next()) |large| {
        if (large.value_ptr.freed) {
            _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
        }
    }
}

Functiondeinit[src]

pub fn deinit(self: *Self) std.heap.Check

Returns std.heap.Check.leak if there were leaks; std.heap.Check.ok otherwise.

Parameters

self: *Self

Source Code

Source code
pub fn deinit(self: *Self) std.heap.Check {
    const leaks = if (config.safety) self.detectLeaks() else false;
    if (config.retain_metadata) self.freeRetainedMetadata();
    self.large_allocations.deinit(self.backing_allocator);
    self.* = undefined;
    return if (leaks) .leak else .ok;
}

Source Code

Source code
pub fn DebugAllocator(comptime config: Config) type {
    return struct {
        backing_allocator: Allocator = std.heap.page_allocator,
        /// Tracks the active bucket, which is the one that has free slots in it.
        buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
        large_allocations: LargeAllocTable = .empty,
        total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
        requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
        mutex: @TypeOf(mutex_init) = mutex_init,

        const Self = @This();

        pub const init: Self = .{};

        /// These can be derived from size_class_index but the calculation is nontrivial.
        const slot_counts: [small_bucket_count]SlotIndex = init: {
            @setEvalBranchQuota(10000);
            var result: [small_bucket_count]SlotIndex = undefined;
            for (&result, 0..) |*elem, i| elem.* = calculateSlotCount(i);
            break :init result;
        };

        comptime {
            assert(math.isPowerOfTwo(page_size));
        }

        const page_size = config.page_size;
        const page_align: mem.Alignment = .fromByteUnits(page_size);
        /// Integer type for pointing to slots in a small allocation
        const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);

        const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
        const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};

        const mutex_init = if (config.MutexType) |T|
            T{}
        else if (config.thread_safe)
            std.Thread.Mutex{}
        else
            DummyMutex{};

        const DummyMutex = struct {
            inline fn lock(_: *DummyMutex) void {}
            inline fn unlock(_: *DummyMutex) void {}
        };

        const stack_n = config.stack_trace_frames;
        const one_trace_size = @sizeOf(usize) * stack_n;
        const traces_per_slot = 2;

        pub const Error = mem.Allocator.Error;

        /// Avoids creating buckets that would only be able to store a small
        /// number of slots. Value of 1 means 2 is the minimum slot count.
        const minimum_slots_per_bucket_log2 = 1;
        const small_bucket_count = math.log2(page_size) - minimum_slots_per_bucket_log2;
        const largest_bucket_object_size = 1 << (small_bucket_count - 1);
        const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);

        const bucketCompare = struct {
            fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
                return std.math.order(@intFromPtr(a.page), @intFromPtr(b.page));
            }
        }.compare;

        const LargeAlloc = struct {
            bytes: []u8,
            requested_size: if (config.enable_memory_limit) usize else void,
            stack_addresses: [trace_n][stack_n]usize,
            freed: if (config.retain_metadata) bool else void,
            alignment: if (config.never_unmap and config.retain_metadata) mem.Alignment else void,

            const trace_n = if (config.retain_metadata) traces_per_slot else 1;

            fn dumpStackTrace(self: *LargeAlloc, trace_kind: TraceKind) void {
                std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
            }

            fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
                assert(@intFromEnum(trace_kind) < trace_n);
                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
                var len: usize = 0;
                while (len < stack_n and stack_addresses[len] != 0) {
                    len += 1;
                }
                return .{
                    .instruction_addresses = stack_addresses,
                    .index = len,
                };
            }

            fn captureStackTrace(self: *LargeAlloc, ret_addr: usize, trace_kind: TraceKind) void {
                assert(@intFromEnum(trace_kind) < trace_n);
                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
                collectStackTrace(ret_addr, stack_addresses);
            }
        };
        const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);

        /// Bucket: In memory, in order:
        /// * BucketHeader
        /// * bucket_used_bits: [N]usize, // 1 bit for every slot
        /// -- below only exists when config.safety is true --
        /// * requested_sizes: [N]LargestSizeClassInt // 1 int for every slot
        /// * log2_ptr_aligns: [N]u8 // 1 byte for every slot
        /// -- above only exists when config.safety is true --
        /// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
        const BucketHeader = struct {
            allocated_count: SlotIndex,
            freed_count: SlotIndex,
            prev: ?*BucketHeader,
            next: ?*BucketHeader,
            canary: usize = config.canary,

            fn fromPage(page_addr: usize, slot_count: usize) *BucketHeader {
                const unaligned = page_addr + page_size - bucketSize(slot_count);
                return @ptrFromInt(unaligned & ~(@as(usize, @alignOf(BucketHeader)) - 1));
            }

            fn usedBits(bucket: *BucketHeader, index: usize) *usize {
                const ptr: [*]u8 = @ptrCast(bucket);
                const bits: [*]usize = @alignCast(@ptrCast(ptr + @sizeOf(BucketHeader)));
                return &bits[index];
            }

            fn requestedSizes(bucket: *BucketHeader, slot_count: usize) []LargestSizeClassInt {
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(slot_count);
                const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
                return sizes[0..slot_count];
            }

            fn log2PtrAligns(bucket: *BucketHeader, slot_count: usize) []mem.Alignment {
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
                const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(slot_count);
                return @ptrCast(aligns_ptr[0..slot_count]);
            }

            fn stackTracePtr(
                bucket: *BucketHeader,
                slot_count: usize,
                slot_index: SlotIndex,
                trace_kind: TraceKind,
            ) *[stack_n]usize {
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(slot_count);
                const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
                    @intFromEnum(trace_kind) * @as(usize, one_trace_size);
                return @ptrCast(@alignCast(addr));
            }

            fn captureStackTrace(
                bucket: *BucketHeader,
                ret_addr: usize,
                slot_count: usize,
                slot_index: SlotIndex,
                trace_kind: TraceKind,
            ) void {
                // Initialize them to 0. When determining the count we must look
                // for non zero addresses.
                const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
                collectStackTrace(ret_addr, stack_addresses);
            }
        };

        pub fn allocator(self: *Self) Allocator {
            return .{
                .ptr = self,
                .vtable = &.{
                    .alloc = alloc,
                    .resize = resize,
                    .remap = remap,
                    .free = free,
                },
            };
        }

        fn bucketStackTrace(
            bucket: *BucketHeader,
            slot_count: usize,
            slot_index: SlotIndex,
            trace_kind: TraceKind,
        ) StackTrace {
            const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
            var len: usize = 0;
            while (len < stack_n and stack_addresses[len] != 0) {
                len += 1;
            }
            return .{
                .instruction_addresses = stack_addresses,
                .index = len,
            };
        }

        fn bucketRequestedSizesStart(slot_count: usize) usize {
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
            return mem.alignForward(
                usize,
                @sizeOf(BucketHeader) + usedBitsSize(slot_count),
                @alignOf(LargestSizeClassInt),
            );
        }

        fn bucketAlignsStart(slot_count: usize) usize {
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
            return bucketRequestedSizesStart(slot_count) + (@sizeOf(LargestSizeClassInt) * slot_count);
        }

        fn bucketStackFramesStart(slot_count: usize) usize {
            const unaligned_start = if (config.safety)
                bucketAlignsStart(slot_count) + slot_count
            else
                @sizeOf(BucketHeader) + usedBitsSize(slot_count);
            return mem.alignForward(usize, unaligned_start, @alignOf(usize));
        }

        fn bucketSize(slot_count: usize) usize {
            return bucketStackFramesStart(slot_count) + one_trace_size * traces_per_slot * slot_count;
        }

        /// This is executed only at compile-time to prepopulate a lookup table.
        fn calculateSlotCount(size_class_index: usize) SlotIndex {
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            var lower: usize = 1 << minimum_slots_per_bucket_log2;
            var upper: usize = (page_size - bucketSize(lower)) / size_class;
            while (upper > lower) {
                const proposed: usize = lower + (upper - lower) / 2;
                if (proposed == lower) return lower;
                const slots_end = proposed * size_class;
                const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
                const end = header_begin + bucketSize(proposed);
                if (end > page_size) {
                    upper = proposed - 1;
                } else {
                    lower = proposed;
                }
            }
            const slots_end = lower * size_class;
            const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
            const end = header_begin + bucketSize(lower);
            assert(end <= page_size);
            return lower;
        }

        fn usedBitsCount(slot_count: usize) usize {
            return (slot_count + (@bitSizeOf(usize) - 1)) / @bitSizeOf(usize);
        }

        fn usedBitsSize(slot_count: usize) usize {
            return usedBitsCount(slot_count) * @sizeOf(usize);
        }

        fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) bool {
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_count = slot_counts[size_class_index];
            var leaks = false;
            for (0..used_bits_count) |used_bits_byte| {
                const used_int = bucket.usedBits(used_bits_byte).*;
                if (used_int != 0) {
                    for (0..@bitSizeOf(usize)) |bit_index_usize| {
                        const bit_index: Log2USize = @intCast(bit_index_usize);
                        const is_used = @as(u1, @truncate(used_int >> bit_index)) != 0;
                        if (is_used) {
                            const slot_index: SlotIndex = @intCast(used_bits_byte * @bitSizeOf(usize) + bit_index);
                            const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
                            const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
                            const addr = page_addr + slot_index * size_class;
                            log.err("memory address 0x{x} leaked: {}", .{ addr, stack_trace });
                            leaks = true;
                        }
                    }
                }
            }
            return leaks;
        }

        /// Emits log messages for leaks and then returns whether there were any leaks.
        pub fn detectLeaks(self: *Self) bool {
            var leaks = false;

            for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
                var optional_bucket = init_optional_bucket;
                const slot_count = slot_counts[size_class_index];
                const used_bits_count = usedBitsCount(slot_count);
                while (optional_bucket) |bucket| {
                    leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
                    optional_bucket = bucket.prev;
                }
            }

            var it = self.large_allocations.valueIterator();
            while (it.next()) |large_alloc| {
                if (config.retain_metadata and large_alloc.freed) continue;
                const stack_trace = large_alloc.getStackTrace(.alloc);
                log.err("memory address 0x{x} leaked: {}", .{
                    @intFromPtr(large_alloc.bytes.ptr), stack_trace,
                });
                leaks = true;
            }
            return leaks;
        }

        fn freeRetainedMetadata(self: *Self) void {
            comptime assert(config.retain_metadata);
            if (config.never_unmap) {
                // free large allocations that were intentionally leaked by never_unmap
                var it = self.large_allocations.iterator();
                while (it.next()) |large| {
                    if (large.value_ptr.freed) {
                        self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.alignment, @returnAddress());
                    }
                }
            }
        }

        pub fn flushRetainedMetadata(self: *Self) void {
            comptime assert(config.retain_metadata);
            self.freeRetainedMetadata();
            // also remove entries from large_allocations
            var it = self.large_allocations.iterator();
            while (it.next()) |large| {
                if (large.value_ptr.freed) {
                    _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
                }
            }
        }

        /// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
        pub fn deinit(self: *Self) std.heap.Check {
            const leaks = if (config.safety) self.detectLeaks() else false;
            if (config.retain_metadata) self.freeRetainedMetadata();
            self.large_allocations.deinit(self.backing_allocator);
            self.* = undefined;
            return if (leaks) .leak else .ok;
        }

        fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
            if (stack_n == 0) return;
            @memset(addresses, 0);
            var stack_trace: StackTrace = .{
                .instruction_addresses = addresses,
                .index = 0,
            };
            std.debug.captureStackTrace(first_trace_addr, &stack_trace);
        }

        fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
            var addresses: [stack_n]usize = @splat(0);
            var second_free_stack_trace: StackTrace = .{
                .instruction_addresses = &addresses,
                .index = 0,
            };
            std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
            log.err("Double free detected. Allocation: {} First free: {} Second free: {}", .{
                alloc_stack_trace, free_stack_trace, second_free_stack_trace,
            });
        }

        /// This function assumes the object is in the large object storage regardless
        /// of the parameters.
        fn resizeLarge(
            self: *Self,
            old_mem: []u8,
            alignment: mem.Alignment,
            new_size: usize,
            ret_addr: usize,
            may_move: bool,
        ) ?[*]u8 {
            if (config.retain_metadata and may_move) {
                // Before looking up the entry (since this could invalidate
                // it), we must reserve space for the new entry in case the
                // allocation is relocated.
                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
            }

            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
                if (config.safety) {
                    @panic("Invalid free");
                } else {
                    unreachable;
                }
            };

            if (config.retain_metadata and entry.value_ptr.freed) {
                if (config.safety) {
                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
                    @panic("Unrecoverable double free");
                } else {
                    unreachable;
                }
            }

            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace: StackTrace = .{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(ret_addr, &free_stack_trace);
                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                    entry.value_ptr.bytes.len,
                    old_mem.len,
                    entry.value_ptr.getStackTrace(.alloc),
                    free_stack_trace,
                });
            }

            // If this would move the allocation into a small size class,
            // refuse the request, because it would require creating small
            // allocation metadata.
            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_size - 1), @intFromEnum(alignment));
            if (new_size_class_index < self.buckets.len) return null;

            // Do memory limit accounting with requested sizes rather than what
            // backing_allocator returns because if we want to return
            // error.OutOfMemory, we have to leave allocation untouched, and
            // that is impossible to guarantee after calling
            // backing_allocator.rawResize.
            const prev_req_bytes = self.total_requested_bytes;
            if (config.enable_memory_limit) {
                const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
                    return null;
                }
                self.total_requested_bytes = new_req_bytes;
            }

            const opt_resized_ptr = if (may_move)
                self.backing_allocator.rawRemap(old_mem, alignment, new_size, ret_addr)
            else if (self.backing_allocator.rawResize(old_mem, alignment, new_size, ret_addr))
                old_mem.ptr
            else
                null;

            const resized_ptr = opt_resized_ptr orelse {
                if (config.enable_memory_limit) {
                    self.total_requested_bytes = prev_req_bytes;
                }
                return null;
            };

            if (config.enable_memory_limit) {
                entry.value_ptr.requested_size = new_size;
            }

            if (config.verbose_log) {
                log.info("large resize {d} bytes at {*} to {d} at {*}", .{
                    old_mem.len, old_mem.ptr, new_size, resized_ptr,
                });
            }
            entry.value_ptr.bytes = resized_ptr[0..new_size];
            if (config.resize_stack_traces)
                entry.value_ptr.captureStackTrace(ret_addr, .alloc);

            // Update the key of the hash map if the memory was relocated.
            if (resized_ptr != old_mem.ptr) {
                const large_alloc = entry.value_ptr.*;
                if (config.retain_metadata) {
                    entry.value_ptr.freed = true;
                    entry.value_ptr.captureStackTrace(ret_addr, .free);
                } else {
                    self.large_allocations.removeByPtr(entry.key_ptr);
                }

                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(resized_ptr));
                if (config.retain_metadata and !config.never_unmap) {
                    // Backing allocator may be reusing memory that we're retaining metadata for
                    assert(!gop.found_existing or gop.value_ptr.freed);
                } else {
                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
                }
                gop.value_ptr.* = large_alloc;
            }

            return resized_ptr;
        }

        /// This function assumes the object is in the large object storage regardless
        /// of the parameters.
        fn freeLarge(
            self: *Self,
            old_mem: []u8,
            alignment: mem.Alignment,
            ret_addr: usize,
        ) void {
            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
                if (config.safety) {
                    @panic("Invalid free");
                } else {
                    unreachable;
                }
            };

            if (config.retain_metadata and entry.value_ptr.freed) {
                if (config.safety) {
                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
                    return;
                } else {
                    unreachable;
                }
            }

            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace = StackTrace{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(ret_addr, &free_stack_trace);
                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                    entry.value_ptr.bytes.len,
                    old_mem.len,
                    entry.value_ptr.getStackTrace(.alloc),
                    free_stack_trace,
                });
            }

            if (!config.never_unmap) {
                self.backing_allocator.rawFree(old_mem, alignment, ret_addr);
            }

            if (config.enable_memory_limit) {
                self.total_requested_bytes -= entry.value_ptr.requested_size;
            }

            if (config.verbose_log) {
                log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
            }

            if (!config.retain_metadata) {
                assert(self.large_allocations.remove(@intFromPtr(old_mem.ptr)));
            } else {
                entry.value_ptr.freed = true;
                entry.value_ptr.captureStackTrace(ret_addr, .free);
            }
        }

        fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            if (config.enable_memory_limit) {
                const new_req_bytes = self.total_requested_bytes + len;
                if (new_req_bytes > self.requested_memory_limit) return null;
                self.total_requested_bytes = new_req_bytes;
            }

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                @branchHint(.unlikely);
                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
                const ptr = self.backing_allocator.rawAlloc(len, alignment, ret_addr) orelse return null;
                const slice = ptr[0..len];

                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(slice.ptr));
                if (config.retain_metadata and !config.never_unmap) {
                    // Backing allocator may be reusing memory that we're retaining metadata for
                    assert(!gop.found_existing or gop.value_ptr.freed);
                } else {
                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
                }
                gop.value_ptr.bytes = slice;
                if (config.enable_memory_limit)
                    gop.value_ptr.requested_size = len;
                gop.value_ptr.captureStackTrace(ret_addr, .alloc);
                if (config.retain_metadata) {
                    gop.value_ptr.freed = false;
                    if (config.never_unmap) {
                        gop.value_ptr.alignment = alignment;
                    }
                }

                if (config.verbose_log) {
                    log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
                }
                return slice.ptr;
            }

            const slot_count = slot_counts[size_class_index];

            if (self.buckets[size_class_index]) |bucket| {
                @branchHint(.likely);
                const slot_index = bucket.allocated_count;
                if (slot_index < slot_count) {
                    @branchHint(.likely);
                    bucket.allocated_count = slot_index + 1;
                    const used_bits_byte = bucket.usedBits(slot_index / @bitSizeOf(usize));
                    const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
                    used_bits_byte.* |= (@as(usize, 1) << used_bit_index);
                    const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
                    if (config.stack_trace_frames > 0) {
                        bucket.captureStackTrace(ret_addr, slot_count, slot_index, .alloc);
                    }
                    if (config.safety) {
                        bucket.requestedSizes(slot_count)[slot_index] = @intCast(len);
                        bucket.log2PtrAligns(slot_count)[slot_index] = alignment;
                    }
                    const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
                    const addr = page_addr + slot_index * size_class;
                    if (config.verbose_log) {
                        log.info("small alloc {d} bytes at 0x{x}", .{ len, addr });
                    }
                    return @ptrFromInt(addr);
                }
            }

            const page = self.backing_allocator.rawAlloc(page_size, page_align, @returnAddress()) orelse
                return null;
            const bucket: *BucketHeader = .fromPage(@intFromPtr(page), slot_count);
            bucket.* = .{
                .allocated_count = 1,
                .freed_count = 0,
                .prev = self.buckets[size_class_index],
                .next = null,
            };
            if (self.buckets[size_class_index]) |old_head| {
                old_head.next = bucket;
            }
            self.buckets[size_class_index] = bucket;

            if (!config.backing_allocator_zeroes) {
                @memset(@as([*]usize, @as(*[1]usize, bucket.usedBits(0)))[0..usedBitsCount(slot_count)], 0);
                if (config.safety) @memset(bucket.requestedSizes(slot_count), 0);
            }

            bucket.usedBits(0).* = 0b1;

            if (config.stack_trace_frames > 0) {
                bucket.captureStackTrace(ret_addr, slot_count, 0, .alloc);
            }

            if (config.safety) {
                bucket.requestedSizes(slot_count)[0] = @intCast(len);
                bucket.log2PtrAligns(slot_count)[0] = alignment;
            }

            if (config.verbose_log) {
                log.info("small alloc {d} bytes at 0x{x}", .{ len, @intFromPtr(page) });
            }

            return page;
        }

        fn resize(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) bool {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                return self.resizeLarge(memory, alignment, new_len, return_address, false) != null;
            } else {
                return resizeSmall(self, memory, alignment, new_len, return_address, size_class_index);
            }
        }

        fn remap(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                return self.resizeLarge(memory, alignment, new_len, return_address, true);
            } else {
                return if (resizeSmall(self, memory, alignment, new_len, return_address, size_class_index)) memory.ptr else null;
            }
        }

        fn free(
            context: *anyopaque,
            old_memory: []u8,
            alignment: mem.Alignment,
            return_address: usize,
        ) void {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(old_memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                @branchHint(.unlikely);
                self.freeLarge(old_memory, alignment, return_address);
                return;
            }

            const slot_count = slot_counts[size_class_index];
            const freed_addr = @intFromPtr(old_memory.ptr);
            const page_addr = freed_addr & ~(page_size - 1);
            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
            if (bucket.canary != config.canary) @panic("Invalid free");
            const page_offset = freed_addr - page_addr;
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_index: SlotIndex = @intCast(page_offset / size_class);
            const used_byte_index = slot_index / @bitSizeOf(usize);
            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
            const used_byte = bucket.usedBits(used_byte_index);
            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
            if (!is_used) {
                if (config.safety) {
                    reportDoubleFree(
                        return_address,
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        bucketStackTrace(bucket, slot_count, slot_index, .free),
                    );
                    // Recoverable since this is a free.
                    return;
                } else {
                    unreachable;
                }
            }

            // Definitely an in-use small alloc now.
            if (config.safety) {
                const requested_size = bucket.requestedSizes(slot_count)[slot_index];
                if (requested_size == 0) @panic("Invalid free");
                const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
                if (old_memory.len != requested_size or alignment != slot_alignment) {
                    var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                    var free_stack_trace: StackTrace = .{
                        .instruction_addresses = &addresses,
                        .index = 0,
                    };
                    std.debug.captureStackTrace(return_address, &free_stack_trace);
                    if (old_memory.len != requested_size) {
                        log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                            requested_size,
                            old_memory.len,
                            bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                            free_stack_trace,
                        });
                    }
                    if (alignment != slot_alignment) {
                        log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
                            slot_alignment.toByteUnits(),
                            alignment.toByteUnits(),
                            bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                            free_stack_trace,
                        });
                    }
                }
            }

            if (config.enable_memory_limit) {
                self.total_requested_bytes -= old_memory.len;
            }

            if (config.stack_trace_frames > 0) {
                // Capture stack trace to be the "first free", in case a double free happens.
                bucket.captureStackTrace(return_address, slot_count, slot_index, .free);
            }

            used_byte.* &= ~(@as(usize, 1) << used_bit_index);
            if (config.safety) {
                bucket.requestedSizes(slot_count)[slot_index] = 0;
            }
            bucket.freed_count += 1;
            if (bucket.freed_count == bucket.allocated_count) {
                if (bucket.prev) |prev| {
                    prev.next = bucket.next;
                }

                if (bucket.next) |next| {
                    assert(self.buckets[size_class_index] != bucket);
                    next.prev = bucket.prev;
                } else {
                    assert(self.buckets[size_class_index] == bucket);
                    self.buckets[size_class_index] = bucket.prev;
                }

                if (!config.never_unmap) {
                    const page: [*]align(page_size) u8 = @ptrFromInt(page_addr);
                    self.backing_allocator.rawFree(page[0..page_size], page_align, @returnAddress());
                }
            }
            if (config.verbose_log) {
                log.info("small free {d} bytes at {*}", .{ old_memory.len, old_memory.ptr });
            }
        }

        fn resizeSmall(
            self: *Self,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
            size_class_index: usize,
        ) bool {
            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_len - 1), @intFromEnum(alignment));
            if (!config.safety) return new_size_class_index == size_class_index;
            const slot_count = slot_counts[size_class_index];
            const memory_addr = @intFromPtr(memory.ptr);
            const page_addr = memory_addr & ~(page_size - 1);
            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
            if (bucket.canary != config.canary) @panic("Invalid free");
            const page_offset = memory_addr - page_addr;
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_index: SlotIndex = @intCast(page_offset / size_class);
            const used_byte_index = slot_index / @bitSizeOf(usize);
            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
            const used_byte = bucket.usedBits(used_byte_index);
            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
            if (!is_used) {
                reportDoubleFree(
                    return_address,
                    bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                    bucketStackTrace(bucket, slot_count, slot_index, .free),
                );
                // Recoverable since this is a free.
                return false;
            }

            // Definitely an in-use small alloc now.
            const requested_size = bucket.requestedSizes(slot_count)[slot_index];
            if (requested_size == 0) @panic("Invalid free");
            const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
            if (memory.len != requested_size or alignment != slot_alignment) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace: StackTrace = .{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(return_address, &free_stack_trace);
                if (memory.len != requested_size) {
                    log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {} Free: {}", .{
                        requested_size,
                        memory.len,
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        free_stack_trace,
                    });
                }
                if (alignment != slot_alignment) {
                    log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
                        slot_alignment.toByteUnits(),
                        alignment.toByteUnits(),
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        free_stack_trace,
                    });
                }
            }

            if (new_size_class_index != size_class_index) return false;

            const prev_req_bytes = self.total_requested_bytes;
            if (config.enable_memory_limit) {
                const new_req_bytes = prev_req_bytes - memory.len + new_len;
                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
                    return false;
                }
                self.total_requested_bytes = new_req_bytes;
            }

            if (memory.len > new_len) @memset(memory[new_len..], undefined);
            if (config.verbose_log)
                log.info("small resize {d} bytes at {*} to {d}", .{ memory.len, memory.ptr, new_len });

            if (config.safety)
                bucket.requestedSizes(slot_count)[slot_index] = @intCast(new_len);

            if (config.resize_stack_traces)
                bucket.captureStackTrace(return_address, slot_count, slot_index, .alloc);

            return true;
        }
    };
}

Type FunctionMemoryPool[src]

A memory pool that can allocate objects of a single type very quickly. Use this when you need to allocate a lot of objects of the same type, because It outperforms general purpose allocators.

Parameters

Item: type

Source Code

Source code
pub fn MemoryPool(comptime Item: type) type {
    return MemoryPoolAligned(Item, @alignOf(Item));
}

Type FunctionMemoryPoolAligned[src]

A memory pool that can allocate objects of a single type very quickly. Use this when you need to allocate a lot of objects of the same type, because It outperforms general purpose allocators.

Parameters

Item: type
alignment: u29

Source Code

Source code
pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: u29) type {
    if (@alignOf(Item) == alignment) {
        return MemoryPoolExtra(Item, .{});
    } else {
        return MemoryPoolExtra(Item, .{ .alignment = alignment });
    }
}

Type FunctionMemoryPoolExtra[src]

A memory pool that can allocate objects of a single type very quickly. Use this when you need to allocate a lot of objects of the same type, because It outperforms general purpose allocators.

Parameters

Item: type
pool_options: Options

Fields

free_list: ?NodePtr = null

Values

Constantitem_size[src]

Size of the memory pool items. This is not necessarily the same as @sizeOf(Item) as the pool also uses the items for internal means.

Source Code

Source code
pub const item_size = @max(@sizeOf(Node), @sizeOf(Item))

Constantitem_alignment[src]

Alignment of the memory pool items. This is not necessarily the same as @alignOf(Item) as the pool also uses the items for internal means.

Source Code

Source code
pub const item_alignment = @max(node_alignment, pool_options.alignment orelse @alignOf(Item))

Functions

Functioninit[src]

pub fn init(allocator: std.mem.Allocator) Pool

Creates a new memory pool.

Parameters

allocator: std.mem.Allocator

Source Code

Source code
pub fn init(allocator: std.mem.Allocator) Pool {
    return .{ .arena = std.heap.ArenaAllocator.init(allocator) };
}

FunctioninitPreheated[src]

pub fn initPreheated(allocator: std.mem.Allocator, initial_size: usize) MemoryPoolError!Pool

Creates a new memory pool and pre-allocates initial_size items. This allows the up to initial_size active allocations before a OutOfMemory error happens when calling create().

Parameters

allocator: std.mem.Allocator
initial_size: usize

Source Code

Source code
pub fn initPreheated(allocator: std.mem.Allocator, initial_size: usize) MemoryPoolError!Pool {
    var pool = init(allocator);
    errdefer pool.deinit();
    try pool.preheat(initial_size);
    return pool;
}

Functiondeinit[src]

pub fn deinit(pool: *Pool) void

Destroys the memory pool and frees all allocated memory.

Parameters

pool: *Pool

Source Code

Source code
pub fn deinit(pool: *Pool) void {
    pool.arena.deinit();
    pool.* = undefined;
}

Functionpreheat[src]

pub fn preheat(pool: *Pool, size: usize) MemoryPoolError!void

Preheats the memory pool by pre-allocating size items. This allows up to size active allocations before an OutOfMemory error might happen when calling create().

Parameters

pool: *Pool
size: usize

Source Code

Source code
pub fn preheat(pool: *Pool, size: usize) MemoryPoolError!void {
    var i: usize = 0;
    while (i < size) : (i += 1) {
        const raw_mem = try pool.allocNew();
        const free_node = @as(NodePtr, @ptrCast(raw_mem));
        free_node.* = Node{
            .next = pool.free_list,
        };
        pool.free_list = free_node;
    }
}

Functionreset[src]

pub fn reset(pool: *Pool, mode: ResetMode) bool

Resets the memory pool and destroys all allocated items. This can be used to batch-destroy all objects without invalidating the memory pool.

The function will return whether the reset operation was successful or not. If the reallocation failed false is returned. The pool will still be fully functional in that case, all memory is released. Future allocations just might be slower.

NOTE: If mode is free_all, the function will always return true.

Parameters

pool: *Pool
mode: ResetMode

Source Code

Source code
pub fn reset(pool: *Pool, mode: ResetMode) bool {
    // TODO: Potentially store all allocated objects in a list as well, allowing to
    //       just move them into the free list instead of actually releasing the memory.

    const reset_successful = pool.arena.reset(mode);

    pool.free_list = null;

    return reset_successful;
}

Functioncreate[src]

pub fn create(pool: *Pool) !ItemPtr

Creates a new item and adds it to the memory pool.

Parameters

pool: *Pool

Source Code

Source code
pub fn create(pool: *Pool) !ItemPtr {
    const node = if (pool.free_list) |item| blk: {
        pool.free_list = item.next;
        break :blk item;
    } else if (pool_options.growable)
        @as(NodePtr, @ptrCast(try pool.allocNew()))
    else
        return error.OutOfMemory;

    const ptr = @as(ItemPtr, @ptrCast(node));
    ptr.* = undefined;
    return ptr;
}

Functiondestroy[src]

pub fn destroy(pool: *Pool, ptr: ItemPtr) void

Destroys a previously created item. Only pass items to ptr that were previously created with create() of the same memory pool!

Parameters

pool: *Pool
ptr: ItemPtr

Source Code

Source code
pub fn destroy(pool: *Pool, ptr: ItemPtr) void {
    ptr.* = undefined;

    const node = @as(NodePtr, @ptrCast(ptr));
    node.* = Node{
        .next = pool.free_list,
    };
    pool.free_list = node;
}

Source Code

Source code
pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type {
    return struct {
        const Pool = @This();

        /// Size of the memory pool items. This is not necessarily the same
        /// as `@sizeOf(Item)` as the pool also uses the items for internal means.
        pub const item_size = @max(@sizeOf(Node), @sizeOf(Item));

        // This needs to be kept in sync with Node.
        const node_alignment = @alignOf(*anyopaque);

        /// Alignment of the memory pool items. This is not necessarily the same
        /// as `@alignOf(Item)` as the pool also uses the items for internal means.
        pub const item_alignment = @max(node_alignment, pool_options.alignment orelse @alignOf(Item));

        const Node = struct {
            next: ?*align(item_alignment) @This(),
        };
        const NodePtr = *align(item_alignment) Node;
        const ItemPtr = *align(item_alignment) Item;

        arena: std.heap.ArenaAllocator,
        free_list: ?NodePtr = null,

        /// Creates a new memory pool.
        pub fn init(allocator: std.mem.Allocator) Pool {
            return .{ .arena = std.heap.ArenaAllocator.init(allocator) };
        }

        /// Creates a new memory pool and pre-allocates `initial_size` items.
        /// This allows the up to `initial_size` active allocations before a
        /// `OutOfMemory` error happens when calling `create()`.
        pub fn initPreheated(allocator: std.mem.Allocator, initial_size: usize) MemoryPoolError!Pool {
            var pool = init(allocator);
            errdefer pool.deinit();
            try pool.preheat(initial_size);
            return pool;
        }

        /// Destroys the memory pool and frees all allocated memory.
        pub fn deinit(pool: *Pool) void {
            pool.arena.deinit();
            pool.* = undefined;
        }

        /// Preheats the memory pool by pre-allocating `size` items.
        /// This allows up to `size` active allocations before an
        /// `OutOfMemory` error might happen when calling `create()`.
        pub fn preheat(pool: *Pool, size: usize) MemoryPoolError!void {
            var i: usize = 0;
            while (i < size) : (i += 1) {
                const raw_mem = try pool.allocNew();
                const free_node = @as(NodePtr, @ptrCast(raw_mem));
                free_node.* = Node{
                    .next = pool.free_list,
                };
                pool.free_list = free_node;
            }
        }

        pub const ResetMode = std.heap.ArenaAllocator.ResetMode;

        /// Resets the memory pool and destroys all allocated items.
        /// This can be used to batch-destroy all objects without invalidating the memory pool.
        ///
        /// The function will return whether the reset operation was successful or not.
        /// If the reallocation  failed `false` is returned. The pool will still be fully
        /// functional in that case, all memory is released. Future allocations just might
        /// be slower.
        ///
        /// NOTE: If `mode` is `free_all`, the function will always return `true`.
        pub fn reset(pool: *Pool, mode: ResetMode) bool {
            // TODO: Potentially store all allocated objects in a list as well, allowing to
            //       just move them into the free list instead of actually releasing the memory.

            const reset_successful = pool.arena.reset(mode);

            pool.free_list = null;

            return reset_successful;
        }

        /// Creates a new item and adds it to the memory pool.
        pub fn create(pool: *Pool) !ItemPtr {
            const node = if (pool.free_list) |item| blk: {
                pool.free_list = item.next;
                break :blk item;
            } else if (pool_options.growable)
                @as(NodePtr, @ptrCast(try pool.allocNew()))
            else
                return error.OutOfMemory;

            const ptr = @as(ItemPtr, @ptrCast(node));
            ptr.* = undefined;
            return ptr;
        }

        /// Destroys a previously created item.
        /// Only pass items to `ptr` that were previously created with `create()` of the same memory pool!
        pub fn destroy(pool: *Pool, ptr: ItemPtr) void {
            ptr.* = undefined;

            const node = @as(NodePtr, @ptrCast(ptr));
            node.* = Node{
                .next = pool.free_list,
            };
            pool.free_list = node;
        }

        fn allocNew(pool: *Pool) MemoryPoolError!*align(item_alignment) [item_size]u8 {
            const mem = try pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size);
            return mem[0..item_size]; // coerce slice to array pointer
        }
    };
}

Type FunctionStackFallbackAllocator[src]

An allocator that attempts to allocate using a FixedBufferAllocator using an array of size size. If the allocation fails, it will fall back to using fallback_allocator. Easily created with stackFallback.

Parameters

size: usize

Fields

buffer: [size]u8
fallback_allocator: Allocator
fixed_buffer_allocator: FixedBufferAllocator
get_called: if (std.debug.runtime_safety) bool else void =
    if (std.debug.runtime_safety) false else {}

Values

Constantallocator[src]

Unlike most std allocators StackFallbackAllocator modifies its internal state before returning an implementation of theAllocator interface and therefore also doesn't use the usual .allocator() method.

Source Code

Source code
pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead")

Functions

Functionget[src]

pub fn get(self: *Self) Allocator

This function both fetches a Allocator interface to this allocator and resets the internal buffer allocator.

Parameters

self: *Self

Source Code

Source code
pub fn get(self: *Self) Allocator {
    if (std.debug.runtime_safety) {
        assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();`
        self.get_called = true;
    }
    self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
    return .{
        .ptr = self,
        .vtable = &.{
            .alloc = alloc,
            .resize = resize,
            .remap = remap,
            .free = free,
        },
    };
}

Source Code

Source code
pub fn StackFallbackAllocator(comptime size: usize) type {
    return struct {
        const Self = @This();

        buffer: [size]u8,
        fallback_allocator: Allocator,
        fixed_buffer_allocator: FixedBufferAllocator,
        get_called: if (std.debug.runtime_safety) bool else void =
            if (std.debug.runtime_safety) false else {},

        /// This function both fetches a `Allocator` interface to this
        /// allocator *and* resets the internal buffer allocator.
        pub fn get(self: *Self) Allocator {
            if (std.debug.runtime_safety) {
                assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();`
                self.get_called = true;
            }
            self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
            return .{
                .ptr = self,
                .vtable = &.{
                    .alloc = alloc,
                    .resize = resize,
                    .remap = remap,
                    .free = free,
                },
            };
        }

        /// Unlike most std allocators `StackFallbackAllocator` modifies
        /// its internal state before returning an implementation of
        /// the`Allocator` interface and therefore also doesn't use
        /// the usual `.allocator()` method.
        pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead");

        fn alloc(
            ctx: *anyopaque,
            len: usize,
            alignment: mem.Alignment,
            ra: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(ctx));
            return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse
                return self.fallback_allocator.rawAlloc(len, alignment, ra);
        }

        fn resize(
            ctx: *anyopaque,
            buf: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            ra: usize,
        ) bool {
            const self: *Self = @ptrCast(@alignCast(ctx));
            if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
                return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra);
            } else {
                return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
            }
        }

        fn remap(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) {
                return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address);
            } else {
                return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
            }
        }

        fn free(
            ctx: *anyopaque,
            buf: []u8,
            alignment: mem.Alignment,
            ra: usize,
        ) void {
            const self: *Self = @ptrCast(@alignCast(ctx));
            if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
                return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra);
            } else {
                return self.fallback_allocator.rawFree(buf, alignment, ra);
            }
        }
    };
}

Global Variables

Global Variablenext_mmap_addr_hint[src]

TODO Utilize this on Windows.

Source Code

Source code
pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null

Values

Constantpage_size_min[src]

comptime-known minimum page size of the target.

All pointers from mmap or VirtualAlloc are aligned to at least page_size_min, but their actual alignment may be bigger.

This value can be overridden via std.options.page_size_min.

On many systems, the actual page size can only be determined at runtime with pageSize.

Source Code

Source code
pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse
    @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min"))

Constantpage_size_max[src]

comptime-known maximum page size of the target.

Targeting a system with a larger page size may require overriding std.options.page_size_max, as well as providing a corresponding linker option.

The actual page size can only be determined at runtime with pageSize.

Source Code

Source code
pub const page_size_max: usize = std.options.page_size_max orelse (page_size_max_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
    @compileError("freestanding/other page_size_max must provided with std.options.page_size_max")
else
    @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_max; populate std.options.page_size_max"))

Constantc_allocator[src]

Supports the full Allocator interface, including alignment, and exploiting malloc_usable_size if available. For an allocator that directly calls malloc/free, see raw_c_allocator.

Source Code

Source code
pub const c_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &CAllocator.vtable,
}

Constantraw_c_allocator[src]

Asserts allocations are within @alignOf(std.c.max_align_t) and directly calls malloc/free. Does not attempt to utilize malloc_usable_size. This allocator is safe to use as the backing allocator with ArenaAllocator for example and is more optimal in such a case than c_allocator.

Source Code

Source code
pub const raw_c_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &raw_c_allocator_vtable,
}

Constantpage_allocator[src]

On operating systems that support memory mapping, this allocator makes a syscall directly for every allocation and free.

Otherwise, it falls back to the preferred singleton for the target.

Thread-safe.

Source Code

Source code
pub const page_allocator: Allocator = if (@hasDecl(root, "os") and
    @hasDecl(root.os, "heap") and
    @hasDecl(root.os.heap, "page_allocator"))
    root.os.heap.page_allocator
else if (builtin.target.cpu.arch.isWasm()) .{
    .ptr = undefined,
    .vtable = &WasmAllocator.vtable,
} else if (builtin.target.os.tag == .plan9) .{
    .ptr = undefined,
    .vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
} else .{
    .ptr = undefined,
    .vtable = &PageAllocator.vtable,
}

Constantsmp_allocator[src]

Source Code

Source code
pub const smp_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &SmpAllocator.vtable,
}

Constantwasm_allocator[src]

This allocator is fast, small, and specific to WebAssembly. In the future, this will be the implementation automatically selected by GeneralPurposeAllocator when compiling in ReleaseSmall mode for wasm32 and wasm64 architectures. Until then, it is available here to play with.

Source Code

Source code
pub const wasm_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &WasmAllocator.vtable,
}

Functions

FunctionpageSize[src]

pub inline fn pageSize() usize

If the page size is comptime-known, return value is comptime. Otherwise, calls std.options.queryPageSize which by default queries the host operating system at runtime.

Example Usage

test pageSize {
    assert(std.math.isPowerOfTwo(pageSize()));
}

Source Code

Source code
pub inline fn pageSize() usize {
    if (page_size_min == page_size_max) return page_size_min;
    return std.options.queryPageSize();
}

FunctiondefaultQueryPageSize[src]

pub fn defaultQueryPageSize() usize

The default implementation of std.options.queryPageSize. Asserts that the page size is within page_size_min and page_size_max

Example Usage

test defaultQueryPageSize {
    if (builtin.cpu.arch.isWasm()) return error.SkipZigTest;
    assert(std.math.isPowerOfTwo(defaultQueryPageSize()));
}

Source Code

Source code
pub fn defaultQueryPageSize() usize {
    const global = struct {
        var cached_result: std.atomic.Value(usize) = .init(0);
    };
    var size = global.cached_result.load(.unordered);
    if (size > 0) return size;
    size = switch (builtin.os.tag) {
        .linux => if (builtin.link_libc) @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) else std.os.linux.getauxval(std.elf.AT_PAGESZ),
        .driverkit, .ios, .macos, .tvos, .visionos, .watchos => blk: {
            const task_port = std.c.mach_task_self();
            // mach_task_self may fail "if there are any resource failures or other errors".
            if (task_port == std.c.TASK_NULL)
                break :blk 0;
            var info_count = std.c.TASK_VM_INFO_COUNT;
            var vm_info: std.c.task_vm_info_data_t = undefined;
            vm_info.page_size = 0;
            _ = std.c.task_info(
                task_port,
                std.c.TASK_VM_INFO,
                @as(std.c.task_info_t, @ptrCast(&vm_info)),
                &info_count,
            );
            assert(vm_info.page_size != 0);
            break :blk @intCast(vm_info.page_size);
        },
        .windows => blk: {
            var info: std.os.windows.SYSTEM_INFO = undefined;
            std.os.windows.kernel32.GetSystemInfo(&info);
            break :blk info.dwPageSize;
        },
        else => if (builtin.link_libc)
            @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)))
        else if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
            @compileError("unsupported target: freestanding/other")
        else
            @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"),
    };

    assert(size >= page_size_min);
    assert(size <= page_size_max);
    global.cached_result.store(size, .unordered);

    return size;
}

FunctionstackFallback[src]

pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size)

Returns a StackFallbackAllocator allocating using either a FixedBufferAllocator on an array of size size and falling back to fallback_allocator if that fails.

Parameters

size: usize
fallback_allocator: Allocator

Source Code

Source code
pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
    return StackFallbackAllocator(size){
        .buffer = undefined,
        .fallback_allocator = fallback_allocator,
        .fixed_buffer_allocator = undefined,
    };
}

FunctiontestAllocator[src]

pub fn testAllocator(base_allocator: mem.Allocator) !void

This one should not try alignments that exceed what C malloc can handle.

Parameters

base_allocator: mem.Allocator

Source Code

Source code
pub fn testAllocator(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    var slice = try allocator.alloc(*i32, 100);
    try testing.expect(slice.len == 100);
    for (slice, 0..) |*item, i| {
        item.* = try allocator.create(i32);
        item.*.* = @as(i32, @intCast(i));
    }

    slice = try allocator.realloc(slice, 20000);
    try testing.expect(slice.len == 20000);

    for (slice[0..100], 0..) |item, i| {
        try testing.expect(item.* == @as(i32, @intCast(i)));
        allocator.destroy(item);
    }

    if (allocator.resize(slice, 50)) {
        slice = slice[0..50];
        if (allocator.resize(slice, 25)) {
            slice = slice[0..25];
            try testing.expect(allocator.resize(slice, 0));
            slice = slice[0..0];
            slice = try allocator.realloc(slice, 10);
            try testing.expect(slice.len == 10);
        }
    }
    allocator.free(slice);

    // Zero-length allocation
    const empty = try allocator.alloc(u8, 0);
    allocator.free(empty);
    // Allocation with zero-sized types
    const zero_bit_ptr = try allocator.create(u0);
    zero_bit_ptr.* = 0;
    allocator.destroy(zero_bit_ptr);
    const zero_len_array = try allocator.create([0]u64);
    allocator.destroy(zero_len_array);

    const oversize = try allocator.alignedAlloc(u32, null, 5);
    try testing.expect(oversize.len >= 5);
    for (oversize) |*item| {
        item.* = 0xDEADBEEF;
    }
    allocator.free(oversize);
}

FunctiontestAllocatorAligned[src]

pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void

Parameters

base_allocator: mem.Allocator

Source Code

Source code
pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    // Test a few alignment values, smaller and bigger than the type's one
    inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
        // initial
        var slice = try allocator.alignedAlloc(u8, alignment, 10);
        try testing.expect(slice.len == 10);
        // grow
        slice = try allocator.realloc(slice, 100);
        try testing.expect(slice.len == 100);
        if (allocator.resize(slice, 10)) {
            slice = slice[0..10];
        }
        try testing.expect(allocator.resize(slice, 0));
        slice = slice[0..0];
        // realloc from zero
        slice = try allocator.realloc(slice, 100);
        try testing.expect(slice.len == 100);
        if (allocator.resize(slice, 10)) {
            slice = slice[0..10];
        }
        try testing.expect(allocator.resize(slice, 0));
    }
}

FunctiontestAllocatorLargeAlignment[src]

pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void

Parameters

base_allocator: mem.Allocator

Source Code

Source code
pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    const large_align: usize = page_size_min / 2;

    var align_mask: usize = undefined;
    align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];

    var slice = try allocator.alignedAlloc(u8, large_align, 500);
    try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));

    if (allocator.resize(slice, 100)) {
        slice = slice[0..100];
    }

    slice = try allocator.realloc(slice, 5000);
    try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));

    if (allocator.resize(slice, 10)) {
        slice = slice[0..10];
    }

    slice = try allocator.realloc(slice, 20000);
    try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));

    allocator.free(slice);
}

FunctiontestAllocatorAlignedShrink[src]

pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void

Parameters

base_allocator: mem.Allocator

Source Code

Source code
pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    var debug_buffer: [1000]u8 = undefined;
    var fib = FixedBufferAllocator.init(&debug_buffer);
    const debug_allocator = fib.allocator();

    const alloc_size = pageSize() * 2 + 50;
    var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
    defer allocator.free(slice);

    var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
    // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
    // which is 16 pages, hence the 32. This test may require to increase
    // the size of the allocations feeding the `allocator` parameter if they
    // fail, because of this high over-alignment we want to have.
    while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) {
        try stuff_to_free.append(slice);
        slice = try allocator.alignedAlloc(u8, 16, alloc_size);
    }
    while (stuff_to_free.pop()) |item| {
        allocator.free(item);
    }
    slice[0] = 0x12;
    slice[60] = 0x34;

    slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0);
    try testing.expect(slice[0] == 0x12);
    try testing.expect(slice[60] == 0x34);
}

Source Code

Source code
const std = @import("std.zig");
const builtin = @import("builtin");
const root = @import("root");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const c = std.c;
const Allocator = std.mem.Allocator;
const windows = std.os.windows;

pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
pub const SmpAllocator = @import("heap/SmpAllocator.zig");
pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig");
pub const PageAllocator = @import("heap/PageAllocator.zig");
pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator;
pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
pub const WasmAllocator = @import("heap/WasmAllocator.zig");

pub const DebugAllocatorConfig = @import("heap/debug_allocator.zig").Config;
pub const DebugAllocator = @import("heap/debug_allocator.zig").DebugAllocator;
pub const Check = enum { ok, leak };
/// Deprecated; to be removed after 0.14.0 is tagged.
pub const GeneralPurposeAllocatorConfig = DebugAllocatorConfig;
/// Deprecated; to be removed after 0.14.0 is tagged.
pub const GeneralPurposeAllocator = DebugAllocator;

const memory_pool = @import("heap/memory_pool.zig");
pub const MemoryPool = memory_pool.MemoryPool;
pub const MemoryPoolAligned = memory_pool.MemoryPoolAligned;
pub const MemoryPoolExtra = memory_pool.MemoryPoolExtra;
pub const MemoryPoolOptions = memory_pool.Options;

/// TODO Utilize this on Windows.
pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null;

/// comptime-known minimum page size of the target.
///
/// All pointers from `mmap` or `VirtualAlloc` are aligned to at least
/// `page_size_min`, but their actual alignment may be bigger.
///
/// This value can be overridden via `std.options.page_size_min`.
///
/// On many systems, the actual page size can only be determined at runtime
/// with `pageSize`.
pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse
    @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min"));

/// comptime-known maximum page size of the target.
///
/// Targeting a system with a larger page size may require overriding
/// `std.options.page_size_max`, as well as providing a corresponding linker
/// option.
///
/// The actual page size can only be determined at runtime with `pageSize`.
pub const page_size_max: usize = std.options.page_size_max orelse (page_size_max_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
    @compileError("freestanding/other page_size_max must provided with std.options.page_size_max")
else
    @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_max; populate std.options.page_size_max"));

/// If the page size is comptime-known, return value is comptime.
/// Otherwise, calls `std.options.queryPageSize` which by default queries the
/// host operating system at runtime.
pub inline fn pageSize() usize {
    if (page_size_min == page_size_max) return page_size_min;
    return std.options.queryPageSize();
}

test pageSize {
    assert(std.math.isPowerOfTwo(pageSize()));
}

/// The default implementation of `std.options.queryPageSize`.
/// Asserts that the page size is within `page_size_min` and `page_size_max`
pub fn defaultQueryPageSize() usize {
    const global = struct {
        var cached_result: std.atomic.Value(usize) = .init(0);
    };
    var size = global.cached_result.load(.unordered);
    if (size > 0) return size;
    size = switch (builtin.os.tag) {
        .linux => if (builtin.link_libc) @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE))) else std.os.linux.getauxval(std.elf.AT_PAGESZ),
        .driverkit, .ios, .macos, .tvos, .visionos, .watchos => blk: {
            const task_port = std.c.mach_task_self();
            // mach_task_self may fail "if there are any resource failures or other errors".
            if (task_port == std.c.TASK_NULL)
                break :blk 0;
            var info_count = std.c.TASK_VM_INFO_COUNT;
            var vm_info: std.c.task_vm_info_data_t = undefined;
            vm_info.page_size = 0;
            _ = std.c.task_info(
                task_port,
                std.c.TASK_VM_INFO,
                @as(std.c.task_info_t, @ptrCast(&vm_info)),
                &info_count,
            );
            assert(vm_info.page_size != 0);
            break :blk @intCast(vm_info.page_size);
        },
        .windows => blk: {
            var info: std.os.windows.SYSTEM_INFO = undefined;
            std.os.windows.kernel32.GetSystemInfo(&info);
            break :blk info.dwPageSize;
        },
        else => if (builtin.link_libc)
            @intCast(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)))
        else if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
            @compileError("unsupported target: freestanding/other")
        else
            @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"),
    };

    assert(size >= page_size_min);
    assert(size <= page_size_max);
    global.cached_result.store(size, .unordered);

    return size;
}

test defaultQueryPageSize {
    if (builtin.cpu.arch.isWasm()) return error.SkipZigTest;
    assert(std.math.isPowerOfTwo(defaultQueryPageSize()));
}

const CAllocator = struct {
    comptime {
        if (!builtin.link_libc) {
            @compileError("C allocator is only available when linking against libc");
        }
    }

    const vtable: Allocator.VTable = .{
        .alloc = alloc,
        .resize = resize,
        .remap = remap,
        .free = free,
    };

    pub const supports_malloc_size = @TypeOf(malloc_size) != void;
    pub const malloc_size = if (@TypeOf(c.malloc_size) != void)
        c.malloc_size
    else if (@TypeOf(c.malloc_usable_size) != void)
        c.malloc_usable_size
    else if (@TypeOf(c._msize) != void)
        c._msize
    else {};

    pub const supports_posix_memalign = switch (builtin.os.tag) {
        .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => true,
        else => false,
    };

    fn getHeader(ptr: [*]u8) *[*]u8 {
        return @alignCast(@ptrCast(ptr - @sizeOf(usize)));
    }

    fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
        const alignment_bytes = alignment.toByteUnits();
        if (supports_posix_memalign) {
            // The posix_memalign only accepts alignment values that are a
            // multiple of the pointer size
            const effective_alignment = @max(alignment_bytes, @sizeOf(usize));

            var aligned_ptr: ?*anyopaque = undefined;
            if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0)
                return null;

            return @ptrCast(aligned_ptr);
        }

        // Thin wrapper around regular malloc, overallocate to account for
        // alignment padding and store the original malloc()'ed pointer before
        // the aligned address.
        const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null));
        const unaligned_addr = @intFromPtr(unaligned_ptr);
        const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes);
        const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
        getHeader(aligned_ptr).* = unaligned_ptr;

        return aligned_ptr;
    }

    fn alignedFree(ptr: [*]u8) void {
        if (supports_posix_memalign) {
            return c.free(ptr);
        }

        const unaligned_ptr = getHeader(ptr).*;
        c.free(unaligned_ptr);
    }

    fn alignedAllocSize(ptr: [*]u8) usize {
        if (supports_posix_memalign) {
            return CAllocator.malloc_size(ptr);
        }

        const unaligned_ptr = getHeader(ptr).*;
        const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr);
        return CAllocator.malloc_size(unaligned_ptr) - delta;
    }

    fn alloc(
        _: *anyopaque,
        len: usize,
        alignment: mem.Alignment,
        return_address: usize,
    ) ?[*]u8 {
        _ = return_address;
        assert(len > 0);
        return alignedAlloc(len, alignment);
    }

    fn resize(
        _: *anyopaque,
        buf: []u8,
        alignment: mem.Alignment,
        new_len: usize,
        return_address: usize,
    ) bool {
        _ = alignment;
        _ = return_address;
        if (new_len <= buf.len) {
            return true;
        }
        if (CAllocator.supports_malloc_size) {
            const full_len = alignedAllocSize(buf.ptr);
            if (new_len <= full_len) {
                return true;
            }
        }
        return false;
    }

    fn remap(
        context: *anyopaque,
        memory: []u8,
        alignment: mem.Alignment,
        new_len: usize,
        return_address: usize,
    ) ?[*]u8 {
        // realloc would potentially return a new allocation that does not
        // respect the original alignment.
        return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
    }

    fn free(
        _: *anyopaque,
        buf: []u8,
        alignment: mem.Alignment,
        return_address: usize,
    ) void {
        _ = alignment;
        _ = return_address;
        alignedFree(buf.ptr);
    }
};

/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
pub const c_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &CAllocator.vtable,
};

/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly
/// calls `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case than
/// `c_allocator`.
pub const raw_c_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &raw_c_allocator_vtable,
};
const raw_c_allocator_vtable: Allocator.VTable = .{
    .alloc = rawCAlloc,
    .resize = rawCResize,
    .remap = rawCRemap,
    .free = rawCFree,
};

fn rawCAlloc(
    context: *anyopaque,
    len: usize,
    alignment: mem.Alignment,
    return_address: usize,
) ?[*]u8 {
    _ = context;
    _ = return_address;
    assert(alignment.compare(.lte, comptime .fromByteUnits(@alignOf(std.c.max_align_t))));
    // Note that this pointer cannot be aligncasted to max_align_t because if
    // len is < max_align_t then the alignment can be smaller. For example, if
    // max_align_t is 16, but the user requests 8 bytes, there is no built-in
    // type in C that is size 8 and has 16 byte alignment, so the alignment may
    // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
    // is allowed to return a 1-byte aligned pointer.
    return @ptrCast(c.malloc(len));
}

fn rawCResize(
    context: *anyopaque,
    memory: []u8,
    alignment: mem.Alignment,
    new_len: usize,
    return_address: usize,
) bool {
    _ = context;
    _ = memory;
    _ = alignment;
    _ = new_len;
    _ = return_address;
    return false;
}

fn rawCRemap(
    context: *anyopaque,
    memory: []u8,
    alignment: mem.Alignment,
    new_len: usize,
    return_address: usize,
) ?[*]u8 {
    _ = context;
    _ = alignment;
    _ = return_address;
    return @ptrCast(c.realloc(memory.ptr, new_len));
}

fn rawCFree(
    context: *anyopaque,
    memory: []u8,
    alignment: mem.Alignment,
    return_address: usize,
) void {
    _ = context;
    _ = alignment;
    _ = return_address;
    c.free(memory.ptr);
}

/// On operating systems that support memory mapping, this allocator makes a
/// syscall directly for every allocation and free.
///
/// Otherwise, it falls back to the preferred singleton for the target.
///
/// Thread-safe.
pub const page_allocator: Allocator = if (@hasDecl(root, "os") and
    @hasDecl(root.os, "heap") and
    @hasDecl(root.os.heap, "page_allocator"))
    root.os.heap.page_allocator
else if (builtin.target.cpu.arch.isWasm()) .{
    .ptr = undefined,
    .vtable = &WasmAllocator.vtable,
} else if (builtin.target.os.tag == .plan9) .{
    .ptr = undefined,
    .vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
} else .{
    .ptr = undefined,
    .vtable = &PageAllocator.vtable,
};

pub const smp_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &SmpAllocator.vtable,
};

/// This allocator is fast, small, and specific to WebAssembly. In the future,
/// this will be the implementation automatically selected by
/// `GeneralPurposeAllocator` when compiling in `ReleaseSmall` mode for wasm32
/// and wasm64 architectures.
/// Until then, it is available here to play with.
pub const wasm_allocator: Allocator = .{
    .ptr = undefined,
    .vtable = &WasmAllocator.vtable,
};

/// Returns a `StackFallbackAllocator` allocating using either a
/// `FixedBufferAllocator` on an array of size `size` and falling back to
/// `fallback_allocator` if that fails.
pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
    return StackFallbackAllocator(size){
        .buffer = undefined,
        .fallback_allocator = fallback_allocator,
        .fixed_buffer_allocator = undefined,
    };
}

/// An allocator that attempts to allocate using a
/// `FixedBufferAllocator` using an array of size `size`. If the
/// allocation fails, it will fall back to using
/// `fallback_allocator`. Easily created with `stackFallback`.
pub fn StackFallbackAllocator(comptime size: usize) type {
    return struct {
        const Self = @This();

        buffer: [size]u8,
        fallback_allocator: Allocator,
        fixed_buffer_allocator: FixedBufferAllocator,
        get_called: if (std.debug.runtime_safety) bool else void =
            if (std.debug.runtime_safety) false else {},

        /// This function both fetches a `Allocator` interface to this
        /// allocator *and* resets the internal buffer allocator.
        pub fn get(self: *Self) Allocator {
            if (std.debug.runtime_safety) {
                assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();`
                self.get_called = true;
            }
            self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
            return .{
                .ptr = self,
                .vtable = &.{
                    .alloc = alloc,
                    .resize = resize,
                    .remap = remap,
                    .free = free,
                },
            };
        }

        /// Unlike most std allocators `StackFallbackAllocator` modifies
        /// its internal state before returning an implementation of
        /// the`Allocator` interface and therefore also doesn't use
        /// the usual `.allocator()` method.
        pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead");

        fn alloc(
            ctx: *anyopaque,
            len: usize,
            alignment: mem.Alignment,
            ra: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(ctx));
            return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse
                return self.fallback_allocator.rawAlloc(len, alignment, ra);
        }

        fn resize(
            ctx: *anyopaque,
            buf: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            ra: usize,
        ) bool {
            const self: *Self = @ptrCast(@alignCast(ctx));
            if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
                return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra);
            } else {
                return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
            }
        }

        fn remap(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) {
                return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address);
            } else {
                return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
            }
        }

        fn free(
            ctx: *anyopaque,
            buf: []u8,
            alignment: mem.Alignment,
            ra: usize,
        ) void {
            const self: *Self = @ptrCast(@alignCast(ctx));
            if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
                return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra);
            } else {
                return self.fallback_allocator.rawFree(buf, alignment, ra);
            }
        }
    };
}

test c_allocator {
    if (builtin.link_libc) {
        try testAllocator(c_allocator);
        try testAllocatorAligned(c_allocator);
        try testAllocatorLargeAlignment(c_allocator);
        try testAllocatorAlignedShrink(c_allocator);
    }
}

test raw_c_allocator {
    if (builtin.link_libc) {
        try testAllocator(raw_c_allocator);
    }
}

test smp_allocator {
    if (builtin.single_threaded) return;
    try testAllocator(smp_allocator);
    try testAllocatorAligned(smp_allocator);
    try testAllocatorLargeAlignment(smp_allocator);
    try testAllocatorAlignedShrink(smp_allocator);
}

test PageAllocator {
    const allocator = page_allocator;
    try testAllocator(allocator);
    try testAllocatorAligned(allocator);
    if (!builtin.target.cpu.arch.isWasm()) {
        try testAllocatorLargeAlignment(allocator);
        try testAllocatorAlignedShrink(allocator);
    }

    if (builtin.os.tag == .windows) {
        const slice = try allocator.alignedAlloc(u8, page_size_min, 128);
        slice[0] = 0x12;
        slice[127] = 0x34;
        allocator.free(slice);
    }
    {
        var buf = try allocator.alloc(u8, pageSize() + 1);
        defer allocator.free(buf);
        buf = try allocator.realloc(buf, 1); // shrink past the page boundary
    }
}

test ArenaAllocator {
    var arena_allocator = ArenaAllocator.init(page_allocator);
    defer arena_allocator.deinit();
    const allocator = arena_allocator.allocator();

    try testAllocator(allocator);
    try testAllocatorAligned(allocator);
    try testAllocatorLargeAlignment(allocator);
    try testAllocatorAlignedShrink(allocator);
}

test "StackFallbackAllocator" {
    {
        var stack_allocator = stackFallback(4096, std.testing.allocator);
        try testAllocator(stack_allocator.get());
    }
    {
        var stack_allocator = stackFallback(4096, std.testing.allocator);
        try testAllocatorAligned(stack_allocator.get());
    }
    {
        var stack_allocator = stackFallback(4096, std.testing.allocator);
        try testAllocatorLargeAlignment(stack_allocator.get());
    }
    {
        var stack_allocator = stackFallback(4096, std.testing.allocator);
        try testAllocatorAlignedShrink(stack_allocator.get());
    }
}

/// This one should not try alignments that exceed what C malloc can handle.
pub fn testAllocator(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    var slice = try allocator.alloc(*i32, 100);
    try testing.expect(slice.len == 100);
    for (slice, 0..) |*item, i| {
        item.* = try allocator.create(i32);
        item.*.* = @as(i32, @intCast(i));
    }

    slice = try allocator.realloc(slice, 20000);
    try testing.expect(slice.len == 20000);

    for (slice[0..100], 0..) |item, i| {
        try testing.expect(item.* == @as(i32, @intCast(i)));
        allocator.destroy(item);
    }

    if (allocator.resize(slice, 50)) {
        slice = slice[0..50];
        if (allocator.resize(slice, 25)) {
            slice = slice[0..25];
            try testing.expect(allocator.resize(slice, 0));
            slice = slice[0..0];
            slice = try allocator.realloc(slice, 10);
            try testing.expect(slice.len == 10);
        }
    }
    allocator.free(slice);

    // Zero-length allocation
    const empty = try allocator.alloc(u8, 0);
    allocator.free(empty);
    // Allocation with zero-sized types
    const zero_bit_ptr = try allocator.create(u0);
    zero_bit_ptr.* = 0;
    allocator.destroy(zero_bit_ptr);
    const zero_len_array = try allocator.create([0]u64);
    allocator.destroy(zero_len_array);

    const oversize = try allocator.alignedAlloc(u32, null, 5);
    try testing.expect(oversize.len >= 5);
    for (oversize) |*item| {
        item.* = 0xDEADBEEF;
    }
    allocator.free(oversize);
}

pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    // Test a few alignment values, smaller and bigger than the type's one
    inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
        // initial
        var slice = try allocator.alignedAlloc(u8, alignment, 10);
        try testing.expect(slice.len == 10);
        // grow
        slice = try allocator.realloc(slice, 100);
        try testing.expect(slice.len == 100);
        if (allocator.resize(slice, 10)) {
            slice = slice[0..10];
        }
        try testing.expect(allocator.resize(slice, 0));
        slice = slice[0..0];
        // realloc from zero
        slice = try allocator.realloc(slice, 100);
        try testing.expect(slice.len == 100);
        if (allocator.resize(slice, 10)) {
            slice = slice[0..10];
        }
        try testing.expect(allocator.resize(slice, 0));
    }
}

pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    const large_align: usize = page_size_min / 2;

    var align_mask: usize = undefined;
    align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];

    var slice = try allocator.alignedAlloc(u8, large_align, 500);
    try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));

    if (allocator.resize(slice, 100)) {
        slice = slice[0..100];
    }

    slice = try allocator.realloc(slice, 5000);
    try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));

    if (allocator.resize(slice, 10)) {
        slice = slice[0..10];
    }

    slice = try allocator.realloc(slice, 20000);
    try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));

    allocator.free(slice);
}

pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
    var validationAllocator = mem.validationWrap(base_allocator);
    const allocator = validationAllocator.allocator();

    var debug_buffer: [1000]u8 = undefined;
    var fib = FixedBufferAllocator.init(&debug_buffer);
    const debug_allocator = fib.allocator();

    const alloc_size = pageSize() * 2 + 50;
    var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
    defer allocator.free(slice);

    var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
    // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
    // which is 16 pages, hence the 32. This test may require to increase
    // the size of the allocations feeding the `allocator` parameter if they
    // fail, because of this high over-alignment we want to have.
    while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) {
        try stuff_to_free.append(slice);
        slice = try allocator.alignedAlloc(u8, 16, alloc_size);
    }
    while (stuff_to_free.pop()) |item| {
        allocator.free(item);
    }
    slice[0] = 0x12;
    slice[60] = 0x34;

    slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0);
    try testing.expect(slice[0] == 0x12);
    try testing.expect(slice[60] == 0x34);
}

const page_size_min_default: ?usize = switch (builtin.os.tag) {
    .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
        .x86_64 => 4 << 10,
        .aarch64 => 16 << 10,
        else => null,
    },
    .windows => switch (builtin.cpu.arch) {
        // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
        .x86, .x86_64 => 4 << 10,
        // SuperH => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
        // DEC Alpha => 8 << 10,
        // Itanium => 8 << 10,
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
        else => null,
    },
    .wasi => switch (builtin.cpu.arch) {
        .wasm32, .wasm64 => 64 << 10,
        else => null,
    },
    // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
    .uefi => 4 << 10,
    .freebsd => switch (builtin.cpu.arch) {
        // FreeBSD/sys/*
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv32, .riscv64 => 4 << 10,
        else => null,
    },
    .netbsd => switch (builtin.cpu.arch) {
        // NetBSD/sys/arch/*
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .sparc => 4 << 10,
        .sparc64 => 8 << 10,
        .riscv32, .riscv64 => 4 << 10,
        // Sun-2
        .m68k => 2 << 10,
        else => null,
    },
    .dragonfly => switch (builtin.cpu.arch) {
        .x86, .x86_64 => 4 << 10,
        else => null,
    },
    .openbsd => switch (builtin.cpu.arch) {
        // OpenBSD/sys/arch/*
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
        .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv64 => 4 << 10,
        .sparc64 => 8 << 10,
        else => null,
    },
    .solaris, .illumos => switch (builtin.cpu.arch) {
        // src/uts/*/sys/machparam.h
        .x86, .x86_64 => 4 << 10,
        .sparc, .sparc64 => 8 << 10,
        else => null,
    },
    .fuchsia => switch (builtin.cpu.arch) {
        // fuchsia/kernel/arch/*/include/arch/defines.h
        .x86_64 => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .riscv64 => 4 << 10,
        else => null,
    },
    // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
    .serenity => 4 << 10,
    .haiku => switch (builtin.cpu.arch) {
        // haiku/headers/posix/arch/*/limits.h
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .m68k => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv64 => 4 << 10,
        .sparc64 => 8 << 10,
        .x86, .x86_64 => 4 << 10,
        else => null,
    },
    .hurd => switch (builtin.cpu.arch) {
        // gnumach/*/include/mach/*/vm_param.h
        .x86, .x86_64 => 4 << 10,
        .aarch64 => null,
        else => null,
    },
    .plan9 => switch (builtin.cpu.arch) {
        // 9front/sys/src/9/*/mem.h
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
        .sparc => 4 << 10,
        else => null,
    },
    .ps3 => switch (builtin.cpu.arch) {
        // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
        .powerpc64 => 1 << 20, // 1 MiB
        else => null,
    },
    .ps4 => switch (builtin.cpu.arch) {
        // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
        .x86, .x86_64 => 4 << 10,
        else => null,
    },
    .ps5 => switch (builtin.cpu.arch) {
        // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
        .x86, .x86_64 => 16 << 10,
        else => null,
    },
    // system/lib/libc/musl/arch/emscripten/bits/limits.h
    .emscripten => 64 << 10,
    .linux => switch (builtin.cpu.arch) {
        // Linux/arch/*/Kconfig
        .arc => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .csky => 4 << 10,
        .hexagon => 4 << 10,
        .loongarch32, .loongarch64 => 4 << 10,
        .m68k => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv32, .riscv64 => 4 << 10,
        .s390x => 4 << 10,
        .sparc => 4 << 10,
        .sparc64 => 8 << 10,
        .x86, .x86_64 => 4 << 10,
        .xtensa => 4 << 10,
        else => null,
    },
    .freestanding, .other => switch (builtin.cpu.arch) {
        .wasm32, .wasm64 => 64 << 10,
        .x86, .x86_64 => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        else => null,
    },
    else => null,
};

const page_size_max_default: ?usize = switch (builtin.os.tag) {
    .driverkit, .ios, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
        .x86_64 => 4 << 10,
        .aarch64 => 16 << 10,
        else => null,
    },
    .windows => switch (builtin.cpu.arch) {
        // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
        .x86, .x86_64 => 4 << 10,
        // SuperH => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
        // DEC Alpha => 8 << 10,
        // Itanium => 8 << 10,
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
        else => null,
    },
    .wasi => switch (builtin.cpu.arch) {
        .wasm32, .wasm64 => 64 << 10,
        else => null,
    },
    // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
    .uefi => 4 << 10,
    .freebsd => switch (builtin.cpu.arch) {
        // FreeBSD/sys/*
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv32, .riscv64 => 4 << 10,
        else => null,
    },
    .netbsd => switch (builtin.cpu.arch) {
        // NetBSD/sys/arch/*
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 64 << 10,
        .mips, .mipsel, .mips64, .mips64el => 16 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 16 << 10,
        .sparc => 8 << 10,
        .sparc64 => 8 << 10,
        .riscv32, .riscv64 => 4 << 10,
        .m68k => 8 << 10,
        else => null,
    },
    .dragonfly => switch (builtin.cpu.arch) {
        .x86, .x86_64 => 4 << 10,
        else => null,
    },
    .openbsd => switch (builtin.cpu.arch) {
        // OpenBSD/sys/arch/*
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
        .mips64, .mips64el => 16 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv64 => 4 << 10,
        .sparc64 => 8 << 10,
        else => null,
    },
    .solaris, .illumos => switch (builtin.cpu.arch) {
        // src/uts/*/sys/machparam.h
        .x86, .x86_64 => 4 << 10,
        .sparc, .sparc64 => 8 << 10,
        else => null,
    },
    .fuchsia => switch (builtin.cpu.arch) {
        // fuchsia/kernel/arch/*/include/arch/defines.h
        .x86_64 => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .riscv64 => 4 << 10,
        else => null,
    },
    // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
    .serenity => 4 << 10,
    .haiku => switch (builtin.cpu.arch) {
        // haiku/headers/posix/arch/*/limits.h
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 4 << 10,
        .m68k => 4 << 10,
        .mips, .mipsel, .mips64, .mips64el => 4 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
        .riscv64 => 4 << 10,
        .sparc64 => 8 << 10,
        .x86, .x86_64 => 4 << 10,
        else => null,
    },
    .hurd => switch (builtin.cpu.arch) {
        // gnumach/*/include/mach/*/vm_param.h
        .x86, .x86_64 => 4 << 10,
        .aarch64 => null,
        else => null,
    },
    .plan9 => switch (builtin.cpu.arch) {
        // 9front/sys/src/9/*/mem.h
        .x86, .x86_64 => 4 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 64 << 10,
        .mips, .mipsel, .mips64, .mips64el => 16 << 10,
        .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
        .sparc => 4 << 10,
        else => null,
    },
    .ps3 => switch (builtin.cpu.arch) {
        // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
        .powerpc64 => 1 << 20, // 1 MiB
        else => null,
    },
    .ps4 => switch (builtin.cpu.arch) {
        // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
        .x86, .x86_64 => 4 << 10,
        else => null,
    },
    .ps5 => switch (builtin.cpu.arch) {
        // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
        .x86, .x86_64 => 16 << 10,
        else => null,
    },
    // system/lib/libc/musl/arch/emscripten/bits/limits.h
    .emscripten => 64 << 10,
    .linux => switch (builtin.cpu.arch) {
        // Linux/arch/*/Kconfig
        .arc => 16 << 10,
        .thumb, .thumbeb, .arm, .armeb => 4 << 10,
        .aarch64, .aarch64_be => 64 << 10,
        .csky => 4 << 10,
        .hexagon => 256 << 10,
        .loongarch32, .loongarch64 => 64 << 10,
        .m68k => 8 << 10,
        .mips, .mipsel, .mips64, .mips64el => 64 << 10,
        .powerpc, .powerpc64, .powerpc64le, .powerpcle => 256 << 10,
        .riscv32, .riscv64 => 4 << 10,
        .s390x => 4 << 10,
        .sparc => 4 << 10,
        .sparc64 => 8 << 10,
        .x86, .x86_64 => 4 << 10,
        .xtensa => 4 << 10,
        else => null,
    },
    .freestanding => switch (builtin.cpu.arch) {
        .wasm32, .wasm64 => 64 << 10,
        else => null,
    },
    else => null,
};

test {
    _ = @import("heap/memory_pool.zig");
    _ = ArenaAllocator;
    _ = GeneralPurposeAllocator;
    _ = FixedBufferAllocator;
    _ = ThreadSafeAllocator;
    _ = SbrkAllocator;
    if (builtin.target.cpu.arch.isWasm()) {
        _ = WasmAllocator;
    }
    if (!builtin.single_threaded) _ = smp_allocator;
}