structstd.debug.SelfInfo.VirtualMachine[src]

This is a virtual machine that runs DWARF call frame instructions.

Fields

columns: std.ArrayListUnmanaged(Column) = .empty
stack: std.ArrayListUnmanaged(ColumnRange) = .empty
current_row: Row = .{}
cie_row: ?Row = null

The result of executing the CIE's initial_instructions

Functions

Functiondeinit[src]

pub fn deinit(self: *VirtualMachine, allocator: std.mem.Allocator) void

Parameters

allocator: std.mem.Allocator

Source Code

Source code
pub fn deinit(self: *VirtualMachine, allocator: std.mem.Allocator) void {
    self.stack.deinit(allocator);
    self.columns.deinit(allocator);
    self.* = undefined;
}

Functionreset[src]

pub fn reset(self: *VirtualMachine) void

Parameters

Source Code

Source code
pub fn reset(self: *VirtualMachine) void {
    self.stack.clearRetainingCapacity();
    self.columns.clearRetainingCapacity();
    self.current_row = .{};
    self.cie_row = null;
}

FunctionrowColumns[src]

pub fn rowColumns(self: VirtualMachine, row: Row) []Column

Return a slice backed by the row's non-CFA columns

Parameters

Source Code

Source code
pub fn rowColumns(self: VirtualMachine, row: Row) []Column {
    if (row.columns.len == 0) return &.{};
    return self.columns.items[row.columns.start..][0..row.columns.len];
}

FunctionrunTo[src]

pub fn runTo( self: *VirtualMachine, allocator: std.mem.Allocator, pc: u64, cie: std.debug.Dwarf.CommonInformationEntry, fde: std.debug.Dwarf.FrameDescriptionEntry, addr_size_bytes: u8, endian: std.builtin.Endian, ) !Row

Runs the CIE instructions, then the FDE instructions. Execution halts once the row that corresponds to pc is known, and the row is returned.

Parameters

Source Code

Source code
pub fn runTo(
    self: *VirtualMachine,
    allocator: std.mem.Allocator,
    pc: u64,
    cie: std.debug.Dwarf.CommonInformationEntry,
    fde: std.debug.Dwarf.FrameDescriptionEntry,
    addr_size_bytes: u8,
    endian: std.builtin.Endian,
) !Row {
    assert(self.cie_row == null);
    if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange;

    var prev_row: Row = self.current_row;

    var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
    var fde_stream = std.io.fixedBufferStream(fde.instructions);
    var streams = [_]*std.io.FixedBufferStream([]const u8){
        &cie_stream,
        &fde_stream,
    };

    for (&streams, 0..) |stream, i| {
        while (stream.pos < stream.buffer.len) {
            const instruction = try std.debug.Dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
            prev_row = try self.step(allocator, cie, i == 0, instruction);
            if (pc < fde.pc_begin + self.current_row.offset) return prev_row;
        }
    }

    return self.current_row;
}

FunctionrunToNative[src]

pub fn runToNative( self: *VirtualMachine, allocator: std.mem.Allocator, pc: u64, cie: std.debug.Dwarf.CommonInformationEntry, fde: std.debug.Dwarf.FrameDescriptionEntry, ) !Row

Source Code

Source code
pub fn runToNative(
    self: *VirtualMachine,
    allocator: std.mem.Allocator,
    pc: u64,
    cie: std.debug.Dwarf.CommonInformationEntry,
    fde: std.debug.Dwarf.FrameDescriptionEntry,
) !Row {
    return self.runTo(allocator, pc, cie, fde, @sizeOf(usize), native_endian);
}

Functionstep[src]

pub fn step( self: *VirtualMachine, allocator: std.mem.Allocator, cie: std.debug.Dwarf.CommonInformationEntry, is_initial: bool, instruction: Dwarf.call_frame.Instruction, ) !Row

Executes a single instruction. If this instruction is from the CIE, is_initial should be set. Returns the value of current_row before executing this instruction.

Parameters

Source Code

Source code
pub fn step(
    self: *VirtualMachine,
    allocator: std.mem.Allocator,
    cie: std.debug.Dwarf.CommonInformationEntry,
    is_initial: bool,
    instruction: Dwarf.call_frame.Instruction,
) !Row {
    // CIE instructions must be run before FDE instructions
    assert(!is_initial or self.cie_row == null);
    if (!is_initial and self.cie_row == null) {
        self.cie_row = self.current_row;
        self.current_row.copy_on_write = true;
    }

    const prev_row = self.current_row;
    switch (instruction) {
        .set_loc => |i| {
            if (i.address <= self.current_row.offset) return error.InvalidOperation;
            // TODO: Check cie.segment_selector_size != 0 for DWARFV4
            self.current_row.offset = i.address;
        },
        inline .advance_loc,
        .advance_loc1,
        .advance_loc2,
        .advance_loc4,
        => |i| {
            self.current_row.offset += i.delta * cie.code_alignment_factor;
            self.current_row.copy_on_write = true;
        },
        inline .offset,
        .offset_extended,
        .offset_extended_sf,
        => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{ .offset = @as(i64, @intCast(i.offset)) * cie.data_alignment_factor };
        },
        inline .restore,
        .restore_extended,
        => |i| {
            try self.resolveCopyOnWrite(allocator);
            if (self.cie_row) |cie_row| {
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = for (self.rowColumns(cie_row)) |cie_column| {
                    if (cie_column.register == i.register) break cie_column.rule;
                } else .{ .default = {} };
            } else return error.InvalidOperation;
        },
        .nop => {},
        .undefined => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{ .undefined = {} };
        },
        .same_value => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{ .same_value = {} };
        },
        .register => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{ .register = i.target_register };
        },
        .remember_state => {
            try self.stack.append(allocator, self.current_row.columns);
            self.current_row.copy_on_write = true;
        },
        .restore_state => {
            const restored_columns = self.stack.pop() orelse return error.InvalidOperation;
            self.columns.shrinkRetainingCapacity(self.columns.items.len - self.current_row.columns.len);
            try self.columns.ensureUnusedCapacity(allocator, restored_columns.len);

            self.current_row.columns.start = self.columns.items.len;
            self.current_row.columns.len = restored_columns.len;
            self.columns.appendSliceAssumeCapacity(self.columns.items[restored_columns.start..][0..restored_columns.len]);
        },
        .def_cfa => |i| {
            try self.resolveCopyOnWrite(allocator);
            self.current_row.cfa = .{
                .register = i.register,
                .rule = .{ .val_offset = @intCast(i.offset) },
            };
        },
        .def_cfa_sf => |i| {
            try self.resolveCopyOnWrite(allocator);
            self.current_row.cfa = .{
                .register = i.register,
                .rule = .{ .val_offset = i.offset * cie.data_alignment_factor },
            };
        },
        .def_cfa_register => |i| {
            try self.resolveCopyOnWrite(allocator);
            if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
            self.current_row.cfa.register = i.register;
        },
        .def_cfa_offset => |i| {
            try self.resolveCopyOnWrite(allocator);
            if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
            self.current_row.cfa.rule = .{
                .val_offset = @intCast(i.offset),
            };
        },
        .def_cfa_offset_sf => |i| {
            try self.resolveCopyOnWrite(allocator);
            if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
            self.current_row.cfa.rule = .{
                .val_offset = i.offset * cie.data_alignment_factor,
            };
        },
        .def_cfa_expression => |i| {
            try self.resolveCopyOnWrite(allocator);
            self.current_row.cfa.register = undefined;
            self.current_row.cfa.rule = .{
                .expression = i.block,
            };
        },
        .expression => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{
                .expression = i.block,
            };
        },
        .val_offset => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{
                .val_offset = @as(i64, @intCast(i.offset)) * cie.data_alignment_factor,
            };
        },
        .val_offset_sf => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{
                .val_offset = i.offset * cie.data_alignment_factor,
            };
        },
        .val_expression => |i| {
            try self.resolveCopyOnWrite(allocator);
            const column = try self.getOrAddColumn(allocator, i.register);
            column.rule = .{
                .val_expression = i.block,
            };
        },
    }

    return prev_row;
}

Source Code

Source code
pub const VirtualMachine = struct {
    /// See section 6.4.1 of the DWARF5 specification for details on each
    const RegisterRule = union(enum) {
        // The spec says that the default rule for each column is the undefined rule.
        // However, it also allows ABI / compiler authors to specify alternate defaults, so
        // there is a distinction made here.
        default: void,
        undefined: void,
        same_value: void,
        // offset(N)
        offset: i64,
        // val_offset(N)
        val_offset: i64,
        // register(R)
        register: u8,
        // expression(E)
        expression: []const u8,
        // val_expression(E)
        val_expression: []const u8,
        // Augmenter-defined rule
        architectural: void,
    };

    /// Each row contains unwinding rules for a set of registers.
    pub const Row = struct {
        /// Offset from `FrameDescriptionEntry.pc_begin`
        offset: u64 = 0,
        /// Special-case column that defines the CFA (Canonical Frame Address) rule.
        /// The register field of this column defines the register that CFA is derived from.
        cfa: Column = .{},
        /// The register fields in these columns define the register the rule applies to.
        columns: ColumnRange = .{},
        /// Indicates that the next write to any column in this row needs to copy
        /// the backing column storage first, as it may be referenced by previous rows.
        copy_on_write: bool = false,
    };

    pub const Column = struct {
        register: ?u8 = null,
        rule: RegisterRule = .{ .default = {} },

        /// Resolves the register rule and places the result into `out` (see regBytes)
        pub fn resolveValue(
            self: Column,
            context: *SelfInfo.UnwindContext,
            expression_context: std.debug.Dwarf.expression.Context,
            ma: *std.debug.MemoryAccessor,
            out: []u8,
        ) !void {
            switch (self.rule) {
                .default => {
                    const register = self.register orelse return error.InvalidRegister;
                    try getRegDefaultValue(register, context, out);
                },
                .undefined => {
                    @memset(out, undefined);
                },
                .same_value => {
                    // TODO: This copy could be eliminated if callers always copy the state then call this function to update it
                    const register = self.register orelse return error.InvalidRegister;
                    const src = try regBytes(context.thread_context, register, context.reg_context);
                    if (src.len != out.len) return error.RegisterSizeMismatch;
                    @memcpy(out, src);
                },
                .offset => |offset| {
                    if (context.cfa) |cfa| {
                        const addr = try applyOffset(cfa, offset);
                        if (ma.load(usize, addr) == null) return error.InvalidAddress;
                        const ptr: *const usize = @ptrFromInt(addr);
                        mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian);
                    } else return error.InvalidCFA;
                },
                .val_offset => |offset| {
                    if (context.cfa) |cfa| {
                        mem.writeInt(usize, out[0..@sizeOf(usize)], try applyOffset(cfa, offset), native_endian);
                    } else return error.InvalidCFA;
                },
                .register => |register| {
                    const src = try regBytes(context.thread_context, register, context.reg_context);
                    if (src.len != out.len) return error.RegisterSizeMismatch;
                    @memcpy(out, try regBytes(context.thread_context, register, context.reg_context));
                },
                .expression => |expression| {
                    context.stack_machine.reset();
                    const value = try context.stack_machine.run(expression, context.allocator, expression_context, context.cfa.?);
                    const addr = if (value) |v| blk: {
                        if (v != .generic) return error.InvalidExpressionValue;
                        break :blk v.generic;
                    } else return error.NoExpressionValue;

                    if (ma.load(usize, addr) == null) return error.InvalidExpressionAddress;
                    const ptr: *usize = @ptrFromInt(addr);
                    mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian);
                },
                .val_expression => |expression| {
                    context.stack_machine.reset();
                    const value = try context.stack_machine.run(expression, context.allocator, expression_context, context.cfa.?);
                    if (value) |v| {
                        if (v != .generic) return error.InvalidExpressionValue;
                        mem.writeInt(usize, out[0..@sizeOf(usize)], v.generic, native_endian);
                    } else return error.NoExpressionValue;
                },
                .architectural => return error.UnimplementedRegisterRule,
            }
        }
    };

    const ColumnRange = struct {
        /// Index into `columns` of the first column in this row.
        start: usize = undefined,
        len: u8 = 0,
    };

    columns: std.ArrayListUnmanaged(Column) = .empty,
    stack: std.ArrayListUnmanaged(ColumnRange) = .empty,
    current_row: Row = .{},

    /// The result of executing the CIE's initial_instructions
    cie_row: ?Row = null,

    pub fn deinit(self: *VirtualMachine, allocator: std.mem.Allocator) void {
        self.stack.deinit(allocator);
        self.columns.deinit(allocator);
        self.* = undefined;
    }

    pub fn reset(self: *VirtualMachine) void {
        self.stack.clearRetainingCapacity();
        self.columns.clearRetainingCapacity();
        self.current_row = .{};
        self.cie_row = null;
    }

    /// Return a slice backed by the row's non-CFA columns
    pub fn rowColumns(self: VirtualMachine, row: Row) []Column {
        if (row.columns.len == 0) return &.{};
        return self.columns.items[row.columns.start..][0..row.columns.len];
    }

    /// Either retrieves or adds a column for `register` (non-CFA) in the current row.
    fn getOrAddColumn(self: *VirtualMachine, allocator: std.mem.Allocator, register: u8) !*Column {
        for (self.rowColumns(self.current_row)) |*c| {
            if (c.register == register) return c;
        }

        if (self.current_row.columns.len == 0) {
            self.current_row.columns.start = self.columns.items.len;
        }
        self.current_row.columns.len += 1;

        const column = try self.columns.addOne(allocator);
        column.* = .{
            .register = register,
        };

        return column;
    }

    /// Runs the CIE instructions, then the FDE instructions. Execution halts
    /// once the row that corresponds to `pc` is known, and the row is returned.
    pub fn runTo(
        self: *VirtualMachine,
        allocator: std.mem.Allocator,
        pc: u64,
        cie: std.debug.Dwarf.CommonInformationEntry,
        fde: std.debug.Dwarf.FrameDescriptionEntry,
        addr_size_bytes: u8,
        endian: std.builtin.Endian,
    ) !Row {
        assert(self.cie_row == null);
        if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange;

        var prev_row: Row = self.current_row;

        var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
        var fde_stream = std.io.fixedBufferStream(fde.instructions);
        var streams = [_]*std.io.FixedBufferStream([]const u8){
            &cie_stream,
            &fde_stream,
        };

        for (&streams, 0..) |stream, i| {
            while (stream.pos < stream.buffer.len) {
                const instruction = try std.debug.Dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
                prev_row = try self.step(allocator, cie, i == 0, instruction);
                if (pc < fde.pc_begin + self.current_row.offset) return prev_row;
            }
        }

        return self.current_row;
    }

    pub fn runToNative(
        self: *VirtualMachine,
        allocator: std.mem.Allocator,
        pc: u64,
        cie: std.debug.Dwarf.CommonInformationEntry,
        fde: std.debug.Dwarf.FrameDescriptionEntry,
    ) !Row {
        return self.runTo(allocator, pc, cie, fde, @sizeOf(usize), native_endian);
    }

    fn resolveCopyOnWrite(self: *VirtualMachine, allocator: std.mem.Allocator) !void {
        if (!self.current_row.copy_on_write) return;

        const new_start = self.columns.items.len;
        if (self.current_row.columns.len > 0) {
            try self.columns.ensureUnusedCapacity(allocator, self.current_row.columns.len);
            self.columns.appendSliceAssumeCapacity(self.rowColumns(self.current_row));
            self.current_row.columns.start = new_start;
        }
    }

    /// Executes a single instruction.
    /// If this instruction is from the CIE, `is_initial` should be set.
    /// Returns the value of `current_row` before executing this instruction.
    pub fn step(
        self: *VirtualMachine,
        allocator: std.mem.Allocator,
        cie: std.debug.Dwarf.CommonInformationEntry,
        is_initial: bool,
        instruction: Dwarf.call_frame.Instruction,
    ) !Row {
        // CIE instructions must be run before FDE instructions
        assert(!is_initial or self.cie_row == null);
        if (!is_initial and self.cie_row == null) {
            self.cie_row = self.current_row;
            self.current_row.copy_on_write = true;
        }

        const prev_row = self.current_row;
        switch (instruction) {
            .set_loc => |i| {
                if (i.address <= self.current_row.offset) return error.InvalidOperation;
                // TODO: Check cie.segment_selector_size != 0 for DWARFV4
                self.current_row.offset = i.address;
            },
            inline .advance_loc,
            .advance_loc1,
            .advance_loc2,
            .advance_loc4,
            => |i| {
                self.current_row.offset += i.delta * cie.code_alignment_factor;
                self.current_row.copy_on_write = true;
            },
            inline .offset,
            .offset_extended,
            .offset_extended_sf,
            => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{ .offset = @as(i64, @intCast(i.offset)) * cie.data_alignment_factor };
            },
            inline .restore,
            .restore_extended,
            => |i| {
                try self.resolveCopyOnWrite(allocator);
                if (self.cie_row) |cie_row| {
                    const column = try self.getOrAddColumn(allocator, i.register);
                    column.rule = for (self.rowColumns(cie_row)) |cie_column| {
                        if (cie_column.register == i.register) break cie_column.rule;
                    } else .{ .default = {} };
                } else return error.InvalidOperation;
            },
            .nop => {},
            .undefined => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{ .undefined = {} };
            },
            .same_value => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{ .same_value = {} };
            },
            .register => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{ .register = i.target_register };
            },
            .remember_state => {
                try self.stack.append(allocator, self.current_row.columns);
                self.current_row.copy_on_write = true;
            },
            .restore_state => {
                const restored_columns = self.stack.pop() orelse return error.InvalidOperation;
                self.columns.shrinkRetainingCapacity(self.columns.items.len - self.current_row.columns.len);
                try self.columns.ensureUnusedCapacity(allocator, restored_columns.len);

                self.current_row.columns.start = self.columns.items.len;
                self.current_row.columns.len = restored_columns.len;
                self.columns.appendSliceAssumeCapacity(self.columns.items[restored_columns.start..][0..restored_columns.len]);
            },
            .def_cfa => |i| {
                try self.resolveCopyOnWrite(allocator);
                self.current_row.cfa = .{
                    .register = i.register,
                    .rule = .{ .val_offset = @intCast(i.offset) },
                };
            },
            .def_cfa_sf => |i| {
                try self.resolveCopyOnWrite(allocator);
                self.current_row.cfa = .{
                    .register = i.register,
                    .rule = .{ .val_offset = i.offset * cie.data_alignment_factor },
                };
            },
            .def_cfa_register => |i| {
                try self.resolveCopyOnWrite(allocator);
                if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
                self.current_row.cfa.register = i.register;
            },
            .def_cfa_offset => |i| {
                try self.resolveCopyOnWrite(allocator);
                if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
                self.current_row.cfa.rule = .{
                    .val_offset = @intCast(i.offset),
                };
            },
            .def_cfa_offset_sf => |i| {
                try self.resolveCopyOnWrite(allocator);
                if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
                self.current_row.cfa.rule = .{
                    .val_offset = i.offset * cie.data_alignment_factor,
                };
            },
            .def_cfa_expression => |i| {
                try self.resolveCopyOnWrite(allocator);
                self.current_row.cfa.register = undefined;
                self.current_row.cfa.rule = .{
                    .expression = i.block,
                };
            },
            .expression => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{
                    .expression = i.block,
                };
            },
            .val_offset => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{
                    .val_offset = @as(i64, @intCast(i.offset)) * cie.data_alignment_factor,
                };
            },
            .val_offset_sf => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{
                    .val_offset = i.offset * cie.data_alignment_factor,
                };
            },
            .val_expression => |i| {
                try self.resolveCopyOnWrite(allocator);
                const column = try self.getOrAddColumn(allocator, i.register);
                column.rule = .{
                    .val_expression = i.block,
                };
            },
        }

        return prev_row;
    }
}