structstd.debug.StackIterator[src]

Fields

first_address: ?usize
fp: usize
ma: MemoryAccessor = MemoryAccessor.init
unwind_state: if (have_ucontext) ?struct {
    debug_info: *SelfInfo,
    dwarf_context: SelfInfo.UnwindContext,
    last_error: ?UnwindError = null,
    failed: bool = false,
} else void = if (have_ucontext) null else {}

Functions

Functioninit[src]

pub fn init(first_address: ?usize, fp: ?usize) StackIterator

Parameters

first_address: ?usize
fp: ?usize

Source Code

Source code
pub fn init(first_address: ?usize, fp: ?usize) StackIterator {
    if (native_arch.isSPARC()) {
        // Flush all the register windows on stack.
        asm volatile (if (std.Target.sparc.featureSetHas(builtin.cpu.features, .v9))
                "flushw"
            else
                "ta 3" // ST_FLUSH_WINDOWS
            ::: "memory");
    }

    return StackIterator{
        .first_address = first_address,
        // TODO: this is a workaround for #16876
        //.fp = fp orelse @frameAddress(),
        .fp = fp orelse blk: {
            const fa = @frameAddress();
            break :blk fa;
        },
    };
}

FunctioninitWithContext[src]

pub fn initWithContext(first_address: ?usize, debug_info: *SelfInfo, context: *posix.ucontext_t) !StackIterator

Parameters

first_address: ?usize
debug_info: *SelfInfo
context: *posix.ucontext_t

Source Code

Source code
pub fn initWithContext(first_address: ?usize, debug_info: *SelfInfo, context: *posix.ucontext_t) !StackIterator {
    // The implementation of DWARF unwinding on aarch64-macos is not complete. However, Apple mandates that
    // the frame pointer register is always used, so on this platform we can safely use the FP-based unwinder.
    if (builtin.target.os.tag.isDarwin() and native_arch == .aarch64)
        return init(first_address, @truncate(context.mcontext.ss.fp));

    if (SelfInfo.supports_unwinding) {
        var iterator = init(first_address, null);
        iterator.unwind_state = .{
            .debug_info = debug_info,
            .dwarf_context = try SelfInfo.UnwindContext.init(debug_info.allocator, context),
        };
        return iterator;
    }

    return init(first_address, null);
}

Functiondeinit[src]

pub fn deinit(it: *StackIterator) void

Parameters

Source Code

Source code
pub fn deinit(it: *StackIterator) void {
    it.ma.deinit();
    if (have_ucontext and it.unwind_state != null) it.unwind_state.?.dwarf_context.deinit();
}

FunctiongetLastError[src]

pub fn getLastError(it: *StackIterator) ?struct { err: UnwindError, address: usize, }

Parameters

Source Code

Source code
pub fn getLastError(it: *StackIterator) ?struct {
    err: UnwindError,
    address: usize,
} {
    if (!have_ucontext) return null;
    if (it.unwind_state) |*unwind_state| {
        if (unwind_state.last_error) |err| {
            unwind_state.last_error = null;
            return .{
                .err = err,
                .address = unwind_state.dwarf_context.pc,
            };
        }
    }

    return null;
}

Functionnext[src]

pub fn next(it: *StackIterator) ?usize

Parameters

Source Code

Source code
pub fn next(it: *StackIterator) ?usize {
    var address = it.next_internal() orelse return null;

    if (it.first_address) |first_address| {
        while (address != first_address) {
            address = it.next_internal() orelse return null;
        }
        it.first_address = null;
    }

    return address;
}

Source Code

Source code
pub const StackIterator = struct {
    // Skip every frame before this address is found.
    first_address: ?usize,
    // Last known value of the frame pointer register.
    fp: usize,
    ma: MemoryAccessor = MemoryAccessor.init,

    // When SelfInfo and a register context is available, this iterator can unwind
    // stacks with frames that don't use a frame pointer (ie. -fomit-frame-pointer),
    // using DWARF and MachO unwind info.
    unwind_state: if (have_ucontext) ?struct {
        debug_info: *SelfInfo,
        dwarf_context: SelfInfo.UnwindContext,
        last_error: ?UnwindError = null,
        failed: bool = false,
    } else void = if (have_ucontext) null else {},

    pub fn init(first_address: ?usize, fp: ?usize) StackIterator {
        if (native_arch.isSPARC()) {
            // Flush all the register windows on stack.
            asm volatile (if (std.Target.sparc.featureSetHas(builtin.cpu.features, .v9))
                    "flushw"
                else
                    "ta 3" // ST_FLUSH_WINDOWS
                ::: "memory");
        }

        return StackIterator{
            .first_address = first_address,
            // TODO: this is a workaround for #16876
            //.fp = fp orelse @frameAddress(),
            .fp = fp orelse blk: {
                const fa = @frameAddress();
                break :blk fa;
            },
        };
    }

    pub fn initWithContext(first_address: ?usize, debug_info: *SelfInfo, context: *posix.ucontext_t) !StackIterator {
        // The implementation of DWARF unwinding on aarch64-macos is not complete. However, Apple mandates that
        // the frame pointer register is always used, so on this platform we can safely use the FP-based unwinder.
        if (builtin.target.os.tag.isDarwin() and native_arch == .aarch64)
            return init(first_address, @truncate(context.mcontext.ss.fp));

        if (SelfInfo.supports_unwinding) {
            var iterator = init(first_address, null);
            iterator.unwind_state = .{
                .debug_info = debug_info,
                .dwarf_context = try SelfInfo.UnwindContext.init(debug_info.allocator, context),
            };
            return iterator;
        }

        return init(first_address, null);
    }

    pub fn deinit(it: *StackIterator) void {
        it.ma.deinit();
        if (have_ucontext and it.unwind_state != null) it.unwind_state.?.dwarf_context.deinit();
    }

    pub fn getLastError(it: *StackIterator) ?struct {
        err: UnwindError,
        address: usize,
    } {
        if (!have_ucontext) return null;
        if (it.unwind_state) |*unwind_state| {
            if (unwind_state.last_error) |err| {
                unwind_state.last_error = null;
                return .{
                    .err = err,
                    .address = unwind_state.dwarf_context.pc,
                };
            }
        }

        return null;
    }

    // Offset of the saved BP wrt the frame pointer.
    const fp_offset = if (native_arch.isRISCV())
        // On RISC-V the frame pointer points to the top of the saved register
        // area, on pretty much every other architecture it points to the stack
        // slot where the previous frame pointer is saved.
        2 * @sizeOf(usize)
    else if (native_arch.isSPARC())
        // On SPARC the previous frame pointer is stored at 14 slots past %fp+BIAS.
        14 * @sizeOf(usize)
    else
        0;

    const fp_bias = if (native_arch.isSPARC())
        // On SPARC frame pointers are biased by a constant.
        2047
    else
        0;

    // Positive offset of the saved PC wrt the frame pointer.
    const pc_offset = if (native_arch == .powerpc64le)
        2 * @sizeOf(usize)
    else
        @sizeOf(usize);

    pub fn next(it: *StackIterator) ?usize {
        var address = it.next_internal() orelse return null;

        if (it.first_address) |first_address| {
            while (address != first_address) {
                address = it.next_internal() orelse return null;
            }
            it.first_address = null;
        }

        return address;
    }

    fn next_unwind(it: *StackIterator) !usize {
        const unwind_state = &it.unwind_state.?;
        const module = try unwind_state.debug_info.getModuleForAddress(unwind_state.dwarf_context.pc);
        switch (native_os) {
            .macos, .ios, .watchos, .tvos, .visionos => {
                // __unwind_info is a requirement for unwinding on Darwin. It may fall back to DWARF, but unwinding
                // via DWARF before attempting to use the compact unwind info will produce incorrect results.
                if (module.unwind_info) |unwind_info| {
                    if (SelfInfo.unwindFrameMachO(
                        unwind_state.debug_info.allocator,
                        module.base_address,
                        &unwind_state.dwarf_context,
                        &it.ma,
                        unwind_info,
                        module.eh_frame,
                    )) |return_address| {
                        return return_address;
                    } else |err| {
                        if (err != error.RequiresDWARFUnwind) return err;
                    }
                } else return error.MissingUnwindInfo;
            },
            else => {},
        }

        if (try module.getDwarfInfoForAddress(unwind_state.debug_info.allocator, unwind_state.dwarf_context.pc)) |di| {
            return SelfInfo.unwindFrameDwarf(
                unwind_state.debug_info.allocator,
                di,
                module.base_address,
                &unwind_state.dwarf_context,
                &it.ma,
                null,
            );
        } else return error.MissingDebugInfo;
    }

    fn next_internal(it: *StackIterator) ?usize {
        if (have_ucontext) {
            if (it.unwind_state) |*unwind_state| {
                if (!unwind_state.failed) {
                    if (unwind_state.dwarf_context.pc == 0) return null;
                    defer it.fp = unwind_state.dwarf_context.getFp() catch 0;
                    if (it.next_unwind()) |return_address| {
                        return return_address;
                    } else |err| {
                        unwind_state.last_error = err;
                        unwind_state.failed = true;

                        // Fall back to fp-based unwinding on the first failure.
                        // We can't attempt it again for other modules higher in the
                        // stack because the full register state won't have been unwound.
                    }
                }
            }
        }

        const fp = if (comptime native_arch.isSPARC())
            // On SPARC the offset is positive. (!)
            math.add(usize, it.fp, fp_offset) catch return null
        else
            math.sub(usize, it.fp, fp_offset) catch return null;

        // Sanity check.
        if (fp == 0 or !mem.isAligned(fp, @alignOf(usize))) return null;
        const new_fp = math.add(usize, it.ma.load(usize, fp) orelse return null, fp_bias) catch
            return null;

        // Sanity check: the stack grows down thus all the parent frames must be
        // be at addresses that are greater (or equal) than the previous one.
        // A zero frame pointer often signals this is the last frame, that case
        // is gracefully handled by the next call to next_internal.
        if (new_fp != 0 and new_fp < it.fp) return null;
        const new_pc = it.ma.load(usize, math.add(usize, fp, pc_offset) catch return null) orelse
            return null;

        it.fp = new_fp;

        return new_pc;
    }
}