Merge pull request #21170 from jacobly0/more-dwarf-cleanup

Dwarf: more cleanup
This commit is contained in:
Jacob Young
2024-08-22 23:20:53 -04:00
committed by GitHub
10 changed files with 212 additions and 77 deletions

View File

@@ -64,7 +64,7 @@ stage3-debug/bin/zig build \
stage3-debug/bin/zig build test docs \
--maxrss 21000000000 \
-Dlldb=$HOME/deps/lldb-zig/Debug-f96d3e6fc/bin/lldb \
-Dlldb=$HOME/deps/lldb-zig/Debug-62538077d/bin/lldb \
-fqemu \
-fwasmtime \
-Dstatic-llvm \

View File

@@ -64,7 +64,7 @@ stage3-release/bin/zig build \
stage3-release/bin/zig build test docs \
--maxrss 21000000000 \
-Dlldb=$HOME/deps/lldb-zig/Release-f96d3e6fc/bin/lldb \
-Dlldb=$HOME/deps/lldb-zig/Release-62538077d/bin/lldb \
-fqemu \
-fwasmtime \
-Dstatic-llvm \

View File

@@ -202,6 +202,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
.C => try writer.writeAll("[*c]"),
.Slice => try writer.writeAll("[]"),
}
if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero ");
if (info.flags.alignment != .none or
info.packed_offset.host_size != 0 or
info.flags.vector_index != .none)
@@ -229,7 +230,6 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
}
if (info.flags.is_const) try writer.writeAll("const ");
if (info.flags.is_volatile) try writer.writeAll("volatile ");
if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero ");
try print(Type.fromInterned(info.child), writer, pt);
return;

View File

@@ -59,6 +59,7 @@ owner: Owner,
inline_func: InternPool.Index,
mod: *Package.Module,
err_msg: ?*ErrorMsg,
arg_index: u32,
args: []MCValue,
va_info: union {
sysv: struct {
@@ -71,7 +72,6 @@ va_info: union {
},
ret_mcv: InstTracking,
fn_type: Type,
arg_index: u32,
src_loc: Zcu.LazySrcLoc,
eflags_inst: ?Air.Inst.Index = null,
@@ -802,11 +802,11 @@ pub fn generate(
.owner = .{ .nav_index = func.owner_nav },
.inline_func = func_index,
.err_msg = null,
.arg_index = undefined,
.args = undefined, // populated after `resolveCallingConventionValues`
.va_info = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
.arg_index = 0,
.src_loc = src_loc,
.end_di_line = func.rbrace_line,
.end_di_column = func.rbrace_column,
@@ -877,6 +877,7 @@ pub fn generate(
}),
);
function.va_info = switch (cc) {
else => undefined,
.SysV => .{ .sysv = .{
.gp_count = call_info.gp_count,
.fp_count = call_info.fp_count,
@@ -884,7 +885,6 @@ pub fn generate(
.reg_save_area = undefined,
} },
.Win64 => .{ .win64 = .{} },
else => undefined,
};
function.gen() catch |err| switch (err) {
@@ -978,11 +978,11 @@ pub fn generateLazy(
.owner = .{ .lazy_sym = lazy_sym },
.inline_func = undefined,
.err_msg = null,
.arg_index = undefined,
.args = undefined,
.va_info = undefined,
.ret_mcv = undefined,
.fn_type = undefined,
.arg_index = undefined,
.src_loc = src_loc,
.end_di_line = undefined, // no debug info yet
.end_di_column = undefined, // no debug info yet
@@ -1482,6 +1482,8 @@ fn asmOpOnly(self: *Self, tag: Mir.Inst.FixedTag) !void {
}
fn asmPseudo(self: *Self, ops: Mir.Inst.Ops) !void {
assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
std.mem.endsWith(u8, @tagName(ops), "_none"));
_ = try self.addInst(.{
.tag = .pseudo,
.ops = ops,
@@ -2101,6 +2103,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
self.arg_index = 0;
for (body) |inst| {
wip_mir_log.debug("{}", .{self.fmtAir(inst)});
verbose_tracking_log.debug("{}", .{self.fmtTracking()});
@@ -2114,6 +2117,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
self.checkInvariantsAfterAirInst(inst, old_air_bookkeeping);
}
if (self.arg_index == 0) try self.airDbgVarArgs();
self.arg_index = 0;
for (body) |inst| {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue;
wip_mir_log.debug("{}", .{self.fmtAir(inst)});
@@ -12055,11 +12060,25 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgArg(self: *Self, inst: Air.Inst.Index) !void {
defer self.finishAirBookkeeping();
if (self.debug_output == .none) return;
const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name != .none) try self.genLocalDebugInfo(inst, self.getResolvedInstValue(inst).short);
if (self.liveness.isUnused(inst)) try self.processDeath(inst);
// skip zero-bit arguments as they don't have a corresponding arg instruction
var arg_index = self.arg_index;
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
if (self.debug_output != .none) {
const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name != .none) try self.genLocalDebugInfo(inst, self.getResolvedInstValue(inst).short);
if (self.liveness.isUnused(inst)) try self.processDeath(inst);
}
for (self.args[self.arg_index..]) |arg| {
if (arg != .none) break;
} else try self.airDbgVarArgs();
self.finishAirBookkeeping();
}
fn airDbgVarArgs(self: *Self) !void {
if (self.pt.zcu.typeToFunc(self.fn_type).?.is_var_args)
try self.asmPseudo(.pseudo_dbg_var_args_none);
}
fn genLocalDebugInfo(

View File

@@ -384,6 +384,13 @@ pub fn emitMir(emit: *Emit) Error!void {
.none => {},
}
},
.pseudo_dbg_var_args_none => {
switch (emit.debug_output) {
.dwarf => |dw| try dw.genVarArgsDebugInfo(),
.plan9 => {},
.none => {},
}
},
.pseudo_dead_none => {},
},
}

View File

@@ -279,6 +279,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_dbg_local_aro,
.pseudo_dbg_local_af,
.pseudo_dbg_local_am,
.pseudo_dbg_var_args_none,
.pseudo_dead_none,
=> {},
else => unreachable,

View File

@@ -924,6 +924,8 @@ pub const Inst = struct {
/// Local argument or variable.
/// Uses `ax` payload with extra data of type `Memory`.
pseudo_dbg_local_am,
/// Remaining arguments are varargs.
pseudo_dbg_var_args_none,
/// Tombstone
/// Emitter should skip this instruction.

View File

@@ -1108,6 +1108,12 @@ pub const WipNav = struct {
wip_nav.any_children = true;
}
pub fn genVarArgsDebugInfo(wip_nav: *WipNav) UpdateError!void {
assert(wip_nav.func != .none);
try wip_nav.abbrevCode(.is_var_args);
wip_nav.any_children = true;
}
pub fn advancePCAndLine(
wip_nav: *WipNav,
delta_line: i33,
@@ -1380,7 +1386,11 @@ pub const WipNav = struct {
fn enumConstValue(
wip_nav: *WipNav,
loaded_enum: InternPool.LoadedEnumType,
abbrev_code: std.enums.EnumFieldStruct(std.builtin.Signedness, AbbrevCode, null),
abbrev_code: struct {
sdata: AbbrevCode,
udata: AbbrevCode,
block: AbbrevCode,
},
field_index: usize,
) UpdateError!void {
const zcu = wip_nav.pt.zcu;
@@ -1390,20 +1400,15 @@ pub const WipNav = struct {
.comptime_int_type => .signed,
else => Type.fromInterned(loaded_enum.tag_ty).intInfo(zcu).signedness,
};
try wip_nav.abbrevCode(switch (signedness) {
inline .signed, .unsigned => |ct_signedness| @field(abbrev_code, @tagName(ct_signedness)),
});
if (loaded_enum.values.len > 0) switch (ip.indexToKey(loaded_enum.values.get(ip)[field_index]).int.storage) {
.u64 => |value| switch (signedness) {
.signed => try sleb128(diw, value),
.unsigned => try uleb128(diw, value),
},
.i64 => |value| switch (signedness) {
.signed => try sleb128(diw, value),
.unsigned => unreachable,
},
.big_int => |big_int| {
const bits = big_int.bitCountTwosCompForSignedness(signedness);
if (loaded_enum.values.len > 0) {
var big_int_space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
const big_int = ip.indexToKey(loaded_enum.values.get(ip)[field_index]).int.storage.toBigInt(&big_int_space);
const bits = @max(1, big_int.bitCountTwosCompForSignedness(signedness));
if (bits <= 64) {
try wip_nav.abbrevCode(switch (signedness) {
.signed => abbrev_code.sdata,
.unsigned => abbrev_code.udata,
});
try wip_nav.debug_info.ensureUnusedCapacity(wip_nav.dwarf.gpa, std.math.divCeil(usize, bits, 7) catch unreachable);
var bit: usize = 0;
var carry: u1 = 1;
@@ -1412,11 +1417,8 @@ pub const WipNav = struct {
const limb_index = bit / limb_bits;
const limb_shift: std.math.Log2Int(std.math.big.Limb) = @intCast(bit % limb_bits);
const low_abs_part: u7 = @truncate(big_int.limbs[limb_index] >> limb_shift);
const abs_part = if (limb_shift > limb_bits - 7) abs_part: {
const next_limb: std.math.big.Limb = if (limb_index + 1 < big_int.limbs.len)
big_int.limbs[limb_index + 1]
else if (big_int.positive) 0 else std.math.maxInt(std.math.big.Limb);
const high_abs_part: u7 = @truncate(next_limb << -%limb_shift);
const abs_part = if (limb_shift > limb_bits - 7 and limb_index + 1 < big_int.limbs.len) abs_part: {
const high_abs_part: u7 = @truncate(big_int.limbs[limb_index + 1] << -%limb_shift);
break :abs_part high_abs_part | low_abs_part;
} else low_abs_part;
const twos_comp_part = if (big_int.positive) abs_part else twos_comp_part: {
@@ -1425,11 +1427,21 @@ pub const WipNav = struct {
};
wip_nav.debug_info.appendAssumeCapacity(@as(u8, if (bit + 7 < bits) 0x80 else 0x00) | twos_comp_part);
}
},
.lazy_align, .lazy_size => unreachable,
} else {
try wip_nav.abbrevCode(abbrev_code.block);
const bytes = Type.fromInterned(loaded_enum.tag_ty).abiSize(wip_nav.pt);
try uleb128(diw, bytes);
big_int.writeTwosComplement(try wip_nav.debug_info.addManyAsSlice(wip_nav.dwarf.gpa, @intCast(bytes)), wip_nav.dwarf.endian);
}
} else switch (signedness) {
.signed => try sleb128(diw, field_index),
.unsigned => try uleb128(diw, field_index),
.signed => {
try wip_nav.abbrevCode(abbrev_code.sdata);
try sleb128(diw, field_index);
},
.unsigned => {
try wip_nav.abbrevCode(abbrev_code.udata);
try uleb128(diw, field_index);
},
}
}
@@ -2267,8 +2279,9 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty));
for (0..loaded_enum.names.len) |field_index| {
try wip_nav.enumConstValue(loaded_enum, .{
.signed = .signed_enum_field,
.unsigned = .unsigned_enum_field,
.sdata = .signed_enum_field,
.udata = .unsigned_enum_field,
.block = .big_enum_field,
}, field_index);
try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip));
}
@@ -2367,8 +2380,9 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
for (0..loaded_union.field_types.len) |field_index| {
try wip_nav.enumConstValue(loaded_tag, .{
.signed = .signed_tagged_union_field,
.unsigned = .unsigned_tagged_union_field,
.sdata = .signed_tagged_union_field,
.udata = .unsigned_tagged_union_field,
.block = .big_tagged_union_field,
}, field_index);
{
try wip_nav.abbrevCode(.struct_field);
@@ -2831,17 +2845,18 @@ fn updateType(
},
.enum_type => {
const loaded_enum = ip.loadEnumType(type_index);
try wip_nav.abbrevCode(.enum_type);
try wip_nav.abbrevCode(if (loaded_enum.names.len > 0) .enum_type else .empty_enum_type);
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty));
for (0..loaded_enum.names.len) |field_index| {
try wip_nav.enumConstValue(loaded_enum, .{
.signed = .signed_enum_field,
.unsigned = .unsigned_enum_field,
.sdata = .signed_enum_field,
.udata = .unsigned_enum_field,
.block = .big_enum_field,
}, field_index);
try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
if (loaded_enum.names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.func_type => |func_type| {
const is_nullary = func_type.param_types.len == 0 and !func_type.is_var_args;
@@ -3049,7 +3064,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
}
},
.@"packed" => {
try wip_nav.abbrevCode(.packed_struct_type);
try wip_nav.abbrevCode(if (loaded_struct.field_types.len > 0) .packed_struct_type else .empty_packed_struct_type);
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip)));
var field_bit_offset: u16 = 0;
@@ -3061,27 +3076,28 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
try uleb128(diw, field_bit_offset);
field_bit_offset += @intCast(field_type.bitSize(pt));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
if (loaded_struct.field_types.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
}
},
.enum_type => {
const loaded_enum = ip.loadEnumType(type_index);
try wip_nav.abbrevCode(.enum_type);
try wip_nav.abbrevCode(if (loaded_enum.names.len > 0) .enum_type else .empty_enum_type);
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty));
for (0..loaded_enum.names.len) |field_index| {
try wip_nav.enumConstValue(loaded_enum, .{
.signed = .signed_enum_field,
.unsigned = .unsigned_enum_field,
.sdata = .signed_enum_field,
.udata = .unsigned_enum_field,
.block = .big_enum_field,
}, field_index);
try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
if (loaded_enum.names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.union_type => {
const loaded_union = ip.loadUnionType(type_index);
try wip_nav.abbrevCode(.union_type);
try wip_nav.abbrevCode(if (loaded_union.field_types.len > 0) .union_type else .empty_union_type);
try wip_nav.strp(name);
const union_layout = pt.getUnionLayout(loaded_union);
try uleb128(diw, union_layout.abi_size);
@@ -3103,8 +3119,9 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
for (0..loaded_union.field_types.len) |field_index| {
try wip_nav.enumConstValue(loaded_tag, .{
.signed = .signed_tagged_union_field,
.unsigned = .unsigned_tagged_union_field,
.sdata = .signed_tagged_union_field,
.udata = .unsigned_tagged_union_field,
.block = .big_tagged_union_field,
}, field_index);
{
try wip_nav.abbrevCode(.struct_field);
@@ -3130,7 +3147,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
field_type.abiAlignment(pt).toByteUnits().?);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
if (loaded_union.field_types.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.opaque_type => {
try wip_nav.abbrevCode(.namespace_struct_type);
@@ -3570,6 +3587,7 @@ const AbbrevCode = enum {
file,
signed_enum_field,
unsigned_enum_field,
big_enum_field,
generated_field,
struct_field,
struct_field_comptime,
@@ -3578,6 +3596,7 @@ const AbbrevCode = enum {
tagged_union,
signed_tagged_union_field,
unsigned_tagged_union_field,
big_tagged_union_field,
tagged_union_default_field,
void_type,
numeric_type,
@@ -3596,7 +3615,9 @@ const AbbrevCode = enum {
namespace_struct_type,
struct_type,
packed_struct_type,
empty_packed_struct_type,
union_type,
empty_union_type,
empty_inlined_func,
inlined_func,
local_arg,
@@ -3778,6 +3799,13 @@ const AbbrevCode = enum {
.{ .name, .strp },
},
},
.big_enum_field = .{
.tag = .enumerator,
.attrs = &.{
.{ .const_value, .block },
.{ .name, .strp },
},
},
.generated_field = .{
.tag = .member,
.attrs = &.{
@@ -3841,6 +3869,13 @@ const AbbrevCode = enum {
.{ .discr_value, .udata },
},
},
.big_tagged_union_field = .{
.tag = .variant,
.children = true,
.attrs = &.{
.{ .discr_value, .block },
},
},
.tagged_union_default_field = .{
.tag = .variant,
.children = true,
@@ -3971,6 +4006,13 @@ const AbbrevCode = enum {
.{ .type, .ref_addr },
},
},
.empty_packed_struct_type = .{
.tag = .structure_type,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.union_type = .{
.tag = .union_type,
.children = true,
@@ -3980,6 +4022,14 @@ const AbbrevCode = enum {
.{ .alignment, .udata },
},
},
.empty_union_type = .{
.tag = .union_type,
.attrs = &.{
.{ .name, .strp },
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.empty_inlined_func = .{
.tag = .inlined_subroutine,
.attrs = &.{

View File

@@ -169,7 +169,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
try dwarf.flushModule(pt);
try dwarf.resolveRelocs();
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
@@ -209,7 +208,28 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
const relocs = &self.relocs.items[atom_ptr.relocsShndx().?];
for (sect.units.items) |*unit| {
try relocs.ensureUnusedCapacity(gpa, unit.cross_section_relocs.items.len);
try relocs.ensureUnusedCapacity(gpa, unit.cross_unit_relocs.items.len +
unit.cross_section_relocs.items.len);
for (unit.cross_unit_relocs.items) |reloc| {
const target_unit = sect.getUnit(reloc.target_unit);
const r_offset = unit.off + reloc.source_off;
const r_addend: i64 = @intCast(target_unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(unit, sect, dwarf).off
else
0));
const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
self.symbol(sym_index).name(elf_file),
r_offset,
r_addend,
relocation.fmtRelocType(r_type, cpu_arch),
});
atom_ptr.addRelocAssumeCapacity(.{
.r_offset = r_offset,
.r_addend = r_addend,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
}, self);
}
for (unit.cross_section_relocs.items) |reloc| {
const target_sym_index = switch (reloc.target_sec) {
.debug_abbrev => self.debug_abbrev_index.?,
@@ -246,7 +266,45 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
for (unit.entries.items) |*entry| {
const entry_off = unit.off + unit.header_len + entry.off;
try relocs.ensureUnusedCapacity(gpa, entry.cross_section_relocs.items.len);
try relocs.ensureUnusedCapacity(gpa, entry.cross_entry_relocs.items.len +
entry.cross_unit_relocs.items.len + entry.cross_section_relocs.items.len +
entry.external_relocs.items.len);
for (entry.cross_entry_relocs.items) |reloc| {
const r_offset = entry_off + reloc.source_off;
const r_addend: i64 = @intCast(unit.off + reloc.target_off + unit.header_len + unit.getEntry(reloc.target_entry).assertNonEmpty(unit, sect, dwarf).off);
const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
self.symbol(sym_index).name(elf_file),
r_offset,
r_addend,
relocation.fmtRelocType(r_type, cpu_arch),
});
atom_ptr.addRelocAssumeCapacity(.{
.r_offset = r_offset,
.r_addend = r_addend,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
}, self);
}
for (entry.cross_unit_relocs.items) |reloc| {
const target_unit = sect.getUnit(reloc.target_unit);
const r_offset = entry_off + reloc.source_off;
const r_addend: i64 = @intCast(target_unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(unit, sect, dwarf).off
else
0));
const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
self.symbol(sym_index).name(elf_file),
r_offset,
r_addend,
relocation.fmtRelocType(r_type, cpu_arch),
});
atom_ptr.addRelocAssumeCapacity(.{
.r_offset = r_offset,
.r_addend = r_addend,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
}, self);
}
for (entry.cross_section_relocs.items) |reloc| {
const target_sym_index = switch (reloc.target_sec) {
.debug_abbrev => self.debug_abbrev_index.?,
@@ -279,8 +337,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
.r_info = (@as(u64, @intCast(target_sym_index)) << 32) | r_type,
}, self);
}
try relocs.ensureUnusedCapacity(gpa, entry.external_relocs.items.len);
for (entry.external_relocs.items) |reloc| {
const target_sym = self.symbol(reloc.target_sym);
const r_offset = entry_off + reloc.source_off;

View File

@@ -205,26 +205,26 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
\\ single_volatile: *volatile u32 = @ptrFromInt(0x1018),
\\ single_const_volatile: *const volatile u32 = @ptrFromInt(0x101c),
\\ single_allowzero: *allowzero u32 = @ptrFromInt(0x1020),
\\ single_const_allowzero: *const allowzero u32 = @ptrFromInt(0x1024),
\\ single_volatile_allowzero: *volatile allowzero u32 = @ptrFromInt(0x1028),
\\ single_const_volatile_allowzero: *const volatile allowzero u32 = @ptrFromInt(0x102c),
\\ single_allowzero_const: *allowzero const u32 = @ptrFromInt(0x1024),
\\ single_allowzero_volatile: *allowzero volatile u32 = @ptrFromInt(0x1028),
\\ single_allowzero_const_volatile: *allowzero const volatile u32 = @ptrFromInt(0x102c),
\\
\\ many: [*]u32 = @ptrFromInt(0x2010),
\\ many_const: [*]const u32 = @ptrFromInt(0x2014),
\\ many_volatile: [*]volatile u32 = @ptrFromInt(0x2018),
\\ many_const_volatile: [*]const volatile u32 = @ptrFromInt(0x201c),
\\ many_allowzero: [*]allowzero u32 = @ptrFromInt(0x2020),
\\ many_const_allowzero: [*]const allowzero u32 = @ptrFromInt(0x2024),
\\ many_volatile_allowzero: [*]volatile allowzero u32 = @ptrFromInt(0x2028),
\\ many_const_volatile_allowzero: [*]const volatile allowzero u32 = @ptrFromInt(0x202c),
\\ many_allowzero_const: [*]allowzero const u32 = @ptrFromInt(0x2024),
\\ many_allowzero_volatile: [*]allowzero volatile u32 = @ptrFromInt(0x2028),
\\ many_allowzero_const_volatile: [*]allowzero const volatile u32 = @ptrFromInt(0x202c),
\\ slice: []u32 = array[0..1],
\\ slice_const: []const u32 = array[0..2],
\\ slice_volatile: []volatile u32 = array[0..3],
\\ slice_const_volatile: []const volatile u32 = array[0..4],
\\ slice_allowzero: []allowzero u32 = array[4..4],
\\ slice_const_allowzero: []const allowzero u32 = array[4..5],
\\ slice_volatile_allowzero: []volatile allowzero u32 = array[4..6],
\\ slice_const_volatile_allowzero: []const volatile allowzero u32 = array[4..7],
\\ slice_allowzero_const: []allowzero const u32 = array[4..5],
\\ slice_allowzero_volatile: []allowzero volatile u32 = array[4..6],
\\ slice_allowzero_const_volatile: []allowzero const volatile u32 = array[4..7],
\\
\\ c: [*c]u32 = @ptrFromInt(0x4010),
\\ c_const: [*c]const u32 = @ptrFromInt(0x4014),
@@ -254,17 +254,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
\\ (*volatile u32) single_volatile = 0x0000000000001018
\\ (*const volatile u32) single_const_volatile = 0x000000000000101c
\\ (*allowzero u32) single_allowzero = 0x0000000000001020
\\ (*const allowzero u32) single_const_allowzero = 0x0000000000001024
\\ (*volatile allowzero u32) single_volatile_allowzero = 0x0000000000001028
\\ (*const volatile allowzero u32) single_const_volatile_allowzero = 0x000000000000102c
\\ (*allowzero const u32) single_allowzero_const = 0x0000000000001024
\\ (*allowzero volatile u32) single_allowzero_volatile = 0x0000000000001028
\\ (*allowzero const volatile u32) single_allowzero_const_volatile = 0x000000000000102c
\\ ([*]u32) many = 0x0000000000002010
\\ ([*]const u32) many_const = 0x0000000000002014
\\ ([*]volatile u32) many_volatile = 0x0000000000002018
\\ ([*]const volatile u32) many_const_volatile = 0x000000000000201c
\\ ([*]allowzero u32) many_allowzero = 0x0000000000002020
\\ ([*]const allowzero u32) many_const_allowzero = 0x0000000000002024
\\ ([*]volatile allowzero u32) many_volatile_allowzero = 0x0000000000002028
\\ ([*]const volatile allowzero u32) many_const_volatile_allowzero = 0x000000000000202c
\\ ([*]allowzero const u32) many_allowzero_const = 0x0000000000002024
\\ ([*]allowzero volatile u32) many_allowzero_volatile = 0x0000000000002028
\\ ([*]allowzero const volatile u32) many_allowzero_const_volatile = 0x000000000000202c
\\ ([]u32) slice = len=1 {
\\ (u32) [0] = 3010
\\ }
@@ -284,14 +284,14 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
\\ (u32) [3] = 3022
\\ }
\\ ([]allowzero u32) slice_allowzero = len=0 {}
\\ ([]const allowzero u32) slice_const_allowzero = len=1 {
\\ ([]allowzero const u32) slice_allowzero_const = len=1 {
\\ (u32) [0] = 3026
\\ }
\\ ([]volatile allowzero u32) slice_volatile_allowzero = len=2 {
\\ ([]allowzero volatile u32) slice_allowzero_volatile = len=2 {
\\ (u32) [0] = 3026
\\ (u32) [1] = 3030
\\ }
\\ ([]const volatile allowzero u32) slice_const_volatile_allowzero = len=3 {
\\ ([]allowzero const volatile u32) slice_allowzero_const_volatile = len=3 {
\\ (u32) [0] = 3026
\\ (u32) [1] = 3030
\\ (u32) [2] = 3034