structstd.Progress.Node[src]

Represents one unit of progress. Each node can have children nodes, or one can use integers with update.

Fields

Values

Constantnone[src]

Source Code

Source code
pub const none: Node = .{ .index = .none }

Constantmax_name_len[src]

Source Code

Source code
pub const max_name_len = 40

Functions

Functionstart[src]

pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node

Create a new child progress node. Thread-safe.

Passing 0 for estimated_total_items means unknown.

Parameters

node: Node
name: []const u8
estimated_total_items: usize

Source Code

Source code
pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node {
    if (noop_impl) {
        assert(node.index == .none);
        return Node.none;
    }
    const node_index = node.index.unwrap() orelse return Node.none;
    const parent = node_index.toParent();

    const freelist = &global_progress.node_freelist;
    var old_freelist = @atomicLoad(Freelist, freelist, .acquire); // acquire to ensure we have the correct "next" entry
    while (old_freelist.head.unwrap()) |free_index| {
        const next_ptr = freelistNextByIndex(free_index);
        const new_freelist: Freelist = .{
            .head = @atomicLoad(Node.OptionalIndex, next_ptr, .monotonic),
            // We don't need to increment the generation when removing nodes from the free list,
            // only when adding them. (This choice is arbitrary; the opposite would also work.)
            .generation = old_freelist.generation,
        };
        old_freelist = @cmpxchgWeak(
            Freelist,
            freelist,
            old_freelist,
            new_freelist,
            .acquire, // not theoretically necessary, but not allowed to be weaker than the failure order
            .acquire, // ensure we have the correct `node_freelist_next` entry on the next iteration
        ) orelse {
            // We won the allocation race.
            return init(free_index, parent, name, estimated_total_items);
        };
    }

    const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic);
    if (free_index >= global_progress.node_storage.len) {
        // Ran out of node storage memory. Progress for this node will not be tracked.
        _ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic);
        return Node.none;
    }

    return init(@enumFromInt(free_index), parent, name, estimated_total_items);
}

FunctioncompleteOne[src]

pub fn completeOne(n: Node) void

This is the same as calling start and then end on the returned Node. Thread-safe.

Parameters

n: Node

Source Code

Source code
pub fn completeOne(n: Node) void {
    const index = n.index.unwrap() orelse return;
    const storage = storageByIndex(index);
    _ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic);
}

FunctionsetCompletedItems[src]

pub fn setCompletedItems(n: Node, completed_items: usize) void

Thread-safe.

Parameters

n: Node
completed_items: usize

Source Code

Source code
pub fn setCompletedItems(n: Node, completed_items: usize) void {
    const index = n.index.unwrap() orelse return;
    const storage = storageByIndex(index);
    @atomicStore(u32, &storage.completed_count, std.math.lossyCast(u32, completed_items), .monotonic);
}

FunctionsetEstimatedTotalItems[src]

pub fn setEstimatedTotalItems(n: Node, count: usize) void

Thread-safe. 0 means unknown.

Parameters

n: Node
count: usize

Source Code

Source code
pub fn setEstimatedTotalItems(n: Node, count: usize) void {
    const index = n.index.unwrap() orelse return;
    const storage = storageByIndex(index);
    // Avoid u32 max int which is used to indicate a special state.
    const saturated = @min(std.math.maxInt(u32) - 1, count);
    @atomicStore(u32, &storage.estimated_total_count, saturated, .monotonic);
}

FunctionincreaseEstimatedTotalItems[src]

pub fn increaseEstimatedTotalItems(n: Node, count: usize) void

Thread-safe.

Parameters

n: Node
count: usize

Source Code

Source code
pub fn increaseEstimatedTotalItems(n: Node, count: usize) void {
    const index = n.index.unwrap() orelse return;
    const storage = storageByIndex(index);
    _ = @atomicRmw(u32, &storage.estimated_total_count, .Add, std.math.lossyCast(u32, count), .monotonic);
}

Functionend[src]

pub fn end(n: Node) void

Finish a started Node. Thread-safe.

Parameters

n: Node

Source Code

Source code
pub fn end(n: Node) void {
    if (noop_impl) {
        assert(n.index == .none);
        return;
    }
    const index = n.index.unwrap() orelse return;
    const parent_ptr = parentByIndex(index);
    if (@atomicLoad(Node.Parent, parent_ptr, .monotonic).unwrap()) |parent_index| {
        _ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic);
        @atomicStore(Node.Parent, parent_ptr, .unused, .monotonic);

        const freelist = &global_progress.node_freelist;
        var old_freelist = @atomicLoad(Freelist, freelist, .monotonic);
        while (true) {
            @atomicStore(Node.OptionalIndex, freelistNextByIndex(index), old_freelist.head, .monotonic);
            old_freelist = @cmpxchgWeak(
                Freelist,
                freelist,
                old_freelist,
                .{ .head = index.toOptional(), .generation = old_freelist.generation +% 1 },
                .release, // ensure a matching `start` sees the freelist link written above
                .monotonic, // our write above is irrelevant if we need to retry
            ) orelse {
                // We won the race.
                return;
            };
        }
    } else {
        @atomicStore(bool, &global_progress.done, true, .monotonic);
        global_progress.redraw_event.set();
        if (global_progress.update_thread) |thread| thread.join();
    }
}

FunctionsetIpcFd[src]

pub fn setIpcFd(node: Node, fd: posix.fd_t) void

Posix-only. Used by std.process.Child. Thread-safe.

Parameters

node: Node

Source Code

Source code
pub fn setIpcFd(node: Node, fd: posix.fd_t) void {
    const index = node.index.unwrap() orelse return;
    assert(fd >= 0);
    assert(fd != posix.STDOUT_FILENO);
    assert(fd != posix.STDIN_FILENO);
    assert(fd != posix.STDERR_FILENO);
    storageByIndex(index).setIpcFd(fd);
}

FunctiongetIpcFd[src]

pub fn getIpcFd(node: Node) ?posix.fd_t

Posix-only. Thread-safe. Assumes the node is storing an IPC file descriptor.

Parameters

node: Node

Source Code

Source code
pub fn getIpcFd(node: Node) ?posix.fd_t {
    const index = node.index.unwrap() orelse return null;
    const storage = storageByIndex(index);
    const int = @atomicLoad(u32, &storage.completed_count, .monotonic);
    return switch (@typeInfo(posix.fd_t)) {
        .int => @bitCast(int),
        .pointer => @ptrFromInt(int),
        else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
    };
}

Source Code

Source code
pub const Node = struct {
    index: OptionalIndex,

    pub const none: Node = .{ .index = .none };

    pub const max_name_len = 40;

    const Storage = extern struct {
        /// Little endian.
        completed_count: u32,
        /// 0 means unknown.
        /// Little endian.
        estimated_total_count: u32,
        name: [max_name_len]u8 align(@alignOf(usize)),

        /// Not thread-safe.
        fn getIpcFd(s: Storage) ?posix.fd_t {
            return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) {
                .int => @bitCast(s.completed_count),
                .pointer => @ptrFromInt(s.completed_count),
                else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
            } else null;
        }

        /// Thread-safe.
        fn setIpcFd(s: *Storage, fd: posix.fd_t) void {
            const integer: u32 = switch (@typeInfo(posix.fd_t)) {
                .int => @bitCast(fd),
                .pointer => @intFromPtr(fd),
                else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
            };
            // `estimated_total_count` max int indicates the special state that
            // causes `completed_count` to be treated as a file descriptor, so
            // the order here matters.
            @atomicStore(u32, &s.completed_count, integer, .monotonic);
            @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); // synchronizes with acquire in `serialize`
        }

        /// Not thread-safe.
        fn byteSwap(s: *Storage) void {
            s.completed_count = @byteSwap(s.completed_count);
            s.estimated_total_count = @byteSwap(s.estimated_total_count);
        }

        comptime {
            assert((@sizeOf(Storage) % 4) == 0);
        }
    };

    const Parent = enum(u8) {
        /// Unallocated storage.
        unused = std.math.maxInt(u8) - 1,
        /// Indicates root node.
        none = std.math.maxInt(u8),
        /// Index into `node_storage`.
        _,

        fn unwrap(i: @This()) ?Index {
            return switch (i) {
                .unused, .none => return null,
                else => @enumFromInt(@intFromEnum(i)),
            };
        }
    };

    pub const OptionalIndex = enum(u8) {
        none = std.math.maxInt(u8),
        /// Index into `node_storage`.
        _,

        pub fn unwrap(i: @This()) ?Index {
            if (i == .none) return null;
            return @enumFromInt(@intFromEnum(i));
        }

        fn toParent(i: @This()) Parent {
            assert(@intFromEnum(i) != @intFromEnum(Parent.unused));
            return @enumFromInt(@intFromEnum(i));
        }
    };

    /// Index into `node_storage`.
    pub const Index = enum(u8) {
        _,

        fn toParent(i: @This()) Parent {
            assert(@intFromEnum(i) != @intFromEnum(Parent.unused));
            assert(@intFromEnum(i) != @intFromEnum(Parent.none));
            return @enumFromInt(@intFromEnum(i));
        }

        pub fn toOptional(i: @This()) OptionalIndex {
            return @enumFromInt(@intFromEnum(i));
        }
    };

    /// Create a new child progress node. Thread-safe.
    ///
    /// Passing 0 for `estimated_total_items` means unknown.
    pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node {
        if (noop_impl) {
            assert(node.index == .none);
            return Node.none;
        }
        const node_index = node.index.unwrap() orelse return Node.none;
        const parent = node_index.toParent();

        const freelist = &global_progress.node_freelist;
        var old_freelist = @atomicLoad(Freelist, freelist, .acquire); // acquire to ensure we have the correct "next" entry
        while (old_freelist.head.unwrap()) |free_index| {
            const next_ptr = freelistNextByIndex(free_index);
            const new_freelist: Freelist = .{
                .head = @atomicLoad(Node.OptionalIndex, next_ptr, .monotonic),
                // We don't need to increment the generation when removing nodes from the free list,
                // only when adding them. (This choice is arbitrary; the opposite would also work.)
                .generation = old_freelist.generation,
            };
            old_freelist = @cmpxchgWeak(
                Freelist,
                freelist,
                old_freelist,
                new_freelist,
                .acquire, // not theoretically necessary, but not allowed to be weaker than the failure order
                .acquire, // ensure we have the correct `node_freelist_next` entry on the next iteration
            ) orelse {
                // We won the allocation race.
                return init(free_index, parent, name, estimated_total_items);
            };
        }

        const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic);
        if (free_index >= global_progress.node_storage.len) {
            // Ran out of node storage memory. Progress for this node will not be tracked.
            _ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic);
            return Node.none;
        }

        return init(@enumFromInt(free_index), parent, name, estimated_total_items);
    }

    /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe.
    pub fn completeOne(n: Node) void {
        const index = n.index.unwrap() orelse return;
        const storage = storageByIndex(index);
        _ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic);
    }

    /// Thread-safe.
    pub fn setCompletedItems(n: Node, completed_items: usize) void {
        const index = n.index.unwrap() orelse return;
        const storage = storageByIndex(index);
        @atomicStore(u32, &storage.completed_count, std.math.lossyCast(u32, completed_items), .monotonic);
    }

    /// Thread-safe. 0 means unknown.
    pub fn setEstimatedTotalItems(n: Node, count: usize) void {
        const index = n.index.unwrap() orelse return;
        const storage = storageByIndex(index);
        // Avoid u32 max int which is used to indicate a special state.
        const saturated = @min(std.math.maxInt(u32) - 1, count);
        @atomicStore(u32, &storage.estimated_total_count, saturated, .monotonic);
    }

    /// Thread-safe.
    pub fn increaseEstimatedTotalItems(n: Node, count: usize) void {
        const index = n.index.unwrap() orelse return;
        const storage = storageByIndex(index);
        _ = @atomicRmw(u32, &storage.estimated_total_count, .Add, std.math.lossyCast(u32, count), .monotonic);
    }

    /// Finish a started `Node`. Thread-safe.
    pub fn end(n: Node) void {
        if (noop_impl) {
            assert(n.index == .none);
            return;
        }
        const index = n.index.unwrap() orelse return;
        const parent_ptr = parentByIndex(index);
        if (@atomicLoad(Node.Parent, parent_ptr, .monotonic).unwrap()) |parent_index| {
            _ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic);
            @atomicStore(Node.Parent, parent_ptr, .unused, .monotonic);

            const freelist = &global_progress.node_freelist;
            var old_freelist = @atomicLoad(Freelist, freelist, .monotonic);
            while (true) {
                @atomicStore(Node.OptionalIndex, freelistNextByIndex(index), old_freelist.head, .monotonic);
                old_freelist = @cmpxchgWeak(
                    Freelist,
                    freelist,
                    old_freelist,
                    .{ .head = index.toOptional(), .generation = old_freelist.generation +% 1 },
                    .release, // ensure a matching `start` sees the freelist link written above
                    .monotonic, // our write above is irrelevant if we need to retry
                ) orelse {
                    // We won the race.
                    return;
                };
            }
        } else {
            @atomicStore(bool, &global_progress.done, true, .monotonic);
            global_progress.redraw_event.set();
            if (global_progress.update_thread) |thread| thread.join();
        }
    }

    /// Posix-only. Used by `std.process.Child`. Thread-safe.
    pub fn setIpcFd(node: Node, fd: posix.fd_t) void {
        const index = node.index.unwrap() orelse return;
        assert(fd >= 0);
        assert(fd != posix.STDOUT_FILENO);
        assert(fd != posix.STDIN_FILENO);
        assert(fd != posix.STDERR_FILENO);
        storageByIndex(index).setIpcFd(fd);
    }

    /// Posix-only. Thread-safe. Assumes the node is storing an IPC file
    /// descriptor.
    pub fn getIpcFd(node: Node) ?posix.fd_t {
        const index = node.index.unwrap() orelse return null;
        const storage = storageByIndex(index);
        const int = @atomicLoad(u32, &storage.completed_count, .monotonic);
        return switch (@typeInfo(posix.fd_t)) {
            .int => @bitCast(int),
            .pointer => @ptrFromInt(int),
            else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
        };
    }

    fn storageByIndex(index: Node.Index) *Node.Storage {
        return &global_progress.node_storage[@intFromEnum(index)];
    }

    fn parentByIndex(index: Node.Index) *Node.Parent {
        return &global_progress.node_parents[@intFromEnum(index)];
    }

    fn freelistNextByIndex(index: Node.Index) *Node.OptionalIndex {
        return &global_progress.node_freelist_next[@intFromEnum(index)];
    }

    fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node {
        assert(parent == .none or @intFromEnum(parent) < node_storage_buffer_len);

        const storage = storageByIndex(free_index);
        @atomicStore(u32, &storage.completed_count, 0, .monotonic);
        @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, estimated_total_items), .monotonic);
        const name_len = @min(max_name_len, name.len);
        copyAtomicStore(storage.name[0..name_len], name[0..name_len]);
        if (name_len < storage.name.len)
            @atomicStore(u8, &storage.name[name_len], 0, .monotonic);

        const parent_ptr = parentByIndex(free_index);
        if (std.debug.runtime_safety) {
            assert(@atomicLoad(Node.Parent, parent_ptr, .monotonic) == .unused);
        }
        @atomicStore(Node.Parent, parent_ptr, parent, .monotonic);

        return .{ .index = free_index.toOptional() };
    }
}