elf: split Atom.allocate into Atom-independent parts
This commit is contained in:
@@ -675,6 +675,83 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void {
|
||||
}
|
||||
}
|
||||
|
||||
const AllocateChunkResult = struct {
|
||||
value: u64,
|
||||
placement: Ref,
|
||||
};
|
||||
|
||||
pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignment) !AllocateChunkResult {
|
||||
const slice = self.sections.slice();
|
||||
const shdr = &slice.items(.shdr)[shndx];
|
||||
const free_list = &slice.items(.free_list)[shndx];
|
||||
const last_atom_ref = &slice.items(.last_atom)[shndx];
|
||||
const new_atom_ideal_capacity = padToIdeal(size);
|
||||
|
||||
// First we look for an appropriately sized free list node.
|
||||
// The list is unordered. We'll just take the first thing that works.
|
||||
const res: AllocateChunkResult = blk: {
|
||||
var i: usize = if (self.base.child_pid == null) 0 else free_list.items.len;
|
||||
while (i < free_list.items.len) {
|
||||
const big_atom_ref = free_list.items[i];
|
||||
const big_atom = self.atom(big_atom_ref).?;
|
||||
// We now have a pointer to a live atom that has too much capacity.
|
||||
// Is it enough that we could fit this new atom?
|
||||
const cap = big_atom.capacity(self);
|
||||
const ideal_capacity = padToIdeal(cap);
|
||||
const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
|
||||
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
|
||||
const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned);
|
||||
if (new_start_vaddr < ideal_capacity_end_vaddr) {
|
||||
// Additional bookkeeping here to notice if this free list node
|
||||
// should be deleted because the block that it points to has grown to take up
|
||||
// more of the extra capacity.
|
||||
if (!big_atom.freeListEligible(self)) {
|
||||
_ = free_list.swapRemove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// At this point we know that we will place the new block here. But the
|
||||
// remaining question is whether there is still yet enough capacity left
|
||||
// over for there to still be a free list node.
|
||||
const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
|
||||
const keep_free_list_node = remaining_capacity >= min_text_capacity;
|
||||
|
||||
if (!keep_free_list_node) {
|
||||
_ = free_list.swapRemove(i);
|
||||
}
|
||||
break :blk .{ .value = new_start_vaddr, .placement = big_atom_ref };
|
||||
} else if (self.atom(last_atom_ref.*)) |last_atom| {
|
||||
const ideal_capacity = padToIdeal(last_atom.size);
|
||||
const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
|
||||
const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr);
|
||||
break :blk .{ .value = new_start_vaddr, .placement = last_atom.ref() };
|
||||
} else {
|
||||
break :blk .{ .value = 0, .placement = .{} };
|
||||
}
|
||||
};
|
||||
|
||||
log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{
|
||||
size,
|
||||
alignment.toByteUnits().?,
|
||||
shdr.sh_addr + res.value,
|
||||
shdr.sh_offset + res.value,
|
||||
});
|
||||
|
||||
const expand_section = if (self.atom(res.placement)) |placement_atom|
|
||||
placement_atom.nextAtom(self) == null
|
||||
else
|
||||
true;
|
||||
if (expand_section) {
|
||||
const needed_size = res.value + size;
|
||||
try self.growAllocSection(shndx, needed_size);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
|
||||
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
|
||||
if (use_lld) {
|
||||
|
||||
@@ -123,140 +123,6 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
||||
return surplus >= Elf.min_text_capacity;
|
||||
}
|
||||
|
||||
pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
||||
const slice = elf_file.sections.slice();
|
||||
const shdr = &slice.items(.shdr)[self.output_section_index];
|
||||
const free_list = &slice.items(.free_list)[self.output_section_index];
|
||||
const last_atom_ref = &slice.items(.last_atom)[self.output_section_index];
|
||||
const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
|
||||
|
||||
// We use these to indicate our intention to update metadata, placing the new atom,
|
||||
// and possibly removing a free list node.
|
||||
// It would be simpler to do it inside the for loop below, but that would cause a
|
||||
// problem if an error was returned later in the function. So this action
|
||||
// is actually carried out at the end of the function, when errors are no longer possible.
|
||||
var atom_placement: ?Elf.Ref = null;
|
||||
var free_list_removal: ?usize = null;
|
||||
|
||||
// First we look for an appropriately sized free list node.
|
||||
// The list is unordered. We'll just take the first thing that works.
|
||||
self.value = blk: {
|
||||
var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len;
|
||||
while (i < free_list.items.len) {
|
||||
const big_atom_ref = free_list.items[i];
|
||||
const big_atom = elf_file.atom(big_atom_ref).?;
|
||||
// We now have a pointer to a live atom that has too much capacity.
|
||||
// Is it enough that we could fit this new atom?
|
||||
const cap = big_atom.capacity(elf_file);
|
||||
const ideal_capacity = Elf.padToIdeal(cap);
|
||||
const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
|
||||
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
|
||||
const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
|
||||
if (new_start_vaddr < ideal_capacity_end_vaddr) {
|
||||
// Additional bookkeeping here to notice if this free list node
|
||||
// should be deleted because the block that it points to has grown to take up
|
||||
// more of the extra capacity.
|
||||
if (!big_atom.freeListEligible(elf_file)) {
|
||||
_ = free_list.swapRemove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// At this point we know that we will place the new block here. But the
|
||||
// remaining question is whether there is still yet enough capacity left
|
||||
// over for there to still be a free list node.
|
||||
const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
|
||||
const keep_free_list_node = remaining_capacity >= Elf.min_text_capacity;
|
||||
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
atom_placement = big_atom_ref;
|
||||
if (!keep_free_list_node) {
|
||||
free_list_removal = i;
|
||||
}
|
||||
break :blk @intCast(new_start_vaddr);
|
||||
} else if (elf_file.atom(last_atom_ref.*)) |last_atom| {
|
||||
const ideal_capacity = Elf.padToIdeal(last_atom.size);
|
||||
const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
|
||||
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
atom_placement = last_atom.ref();
|
||||
break :blk @intCast(new_start_vaddr);
|
||||
} else {
|
||||
break :blk 0;
|
||||
}
|
||||
};
|
||||
|
||||
log.debug("allocated atom({}) : '{s}' at 0x{x} to 0x{x}", .{
|
||||
self.ref(),
|
||||
self.name(elf_file),
|
||||
self.address(elf_file),
|
||||
self.address(elf_file) + @as(i64, @intCast(self.size)),
|
||||
});
|
||||
|
||||
const expand_section = if (atom_placement) |placement_ref|
|
||||
elf_file.atom(placement_ref).?.nextAtom(elf_file) == null
|
||||
else
|
||||
true;
|
||||
if (expand_section) {
|
||||
const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
|
||||
try elf_file.growAllocSection(self.output_section_index, needed_size);
|
||||
last_atom_ref.* = self.ref();
|
||||
|
||||
switch (self.file(elf_file).?) {
|
||||
.zig_object => |zo| if (zo.dwarf) |_| {
|
||||
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
|
||||
// range of the compilation unit. When we expand the text section, this range changes,
|
||||
// so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
|
||||
zo.debug_info_section_dirty = true;
|
||||
// This becomes dirty for the same reason. We could potentially make this more
|
||||
// fine-grained with the addition of support for more compilation units. It is planned to
|
||||
// model each package as a different compilation unit.
|
||||
zo.debug_aranges_section_dirty = true;
|
||||
zo.debug_rnglists_section_dirty = true;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
|
||||
|
||||
// This function can also reallocate an atom.
|
||||
// In this case we need to "unplug" it from its previous location before
|
||||
// plugging it in to its new location.
|
||||
if (self.prevAtom(elf_file)) |prev| {
|
||||
prev.next_atom_ref = self.next_atom_ref;
|
||||
}
|
||||
if (self.nextAtom(elf_file)) |next| {
|
||||
next.prev_atom_ref = self.prev_atom_ref;
|
||||
}
|
||||
|
||||
if (atom_placement) |big_atom_ref| {
|
||||
const big_atom = elf_file.atom(big_atom_ref).?;
|
||||
self.prev_atom_ref = big_atom_ref;
|
||||
self.next_atom_ref = big_atom.next_atom_ref;
|
||||
big_atom.next_atom_ref = self.ref();
|
||||
} else {
|
||||
self.prev_atom_ref = .{ .index = 0, .file = 0 };
|
||||
self.next_atom_ref = .{ .index = 0, .file = 0 };
|
||||
}
|
||||
if (free_list_removal) |i| {
|
||||
_ = free_list.swapRemove(i);
|
||||
}
|
||||
|
||||
self.alive = true;
|
||||
}
|
||||
|
||||
pub fn shrink(self: *Atom, elf_file: *Elf) void {
|
||||
_ = self;
|
||||
_ = elf_file;
|
||||
}
|
||||
|
||||
pub fn grow(self: *Atom, elf_file: *Elf) !void {
|
||||
if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file))
|
||||
try self.allocate(elf_file);
|
||||
}
|
||||
|
||||
pub fn free(self: *Atom, elf_file: *Elf) void {
|
||||
log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) });
|
||||
|
||||
|
||||
@@ -1362,19 +1362,18 @@ fn updateNavCode(
|
||||
const capacity = atom_ptr.capacity(elf_file);
|
||||
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
|
||||
if (need_realloc) {
|
||||
try atom_ptr.grow(elf_file);
|
||||
try self.growAtom(atom_ptr, elf_file);
|
||||
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
|
||||
if (old_vaddr != atom_ptr.value) {
|
||||
sym.value = 0;
|
||||
esym.st_value = 0;
|
||||
}
|
||||
} else if (code.len < old_size) {
|
||||
atom_ptr.shrink(elf_file);
|
||||
// TODO shrink section size
|
||||
}
|
||||
} else {
|
||||
try atom_ptr.allocate(elf_file);
|
||||
try self.allocateAtom(atom_ptr, elf_file);
|
||||
errdefer self.freeNavMetadata(elf_file, sym_index);
|
||||
|
||||
sym.value = 0;
|
||||
esym.st_value = 0;
|
||||
}
|
||||
@@ -1739,7 +1738,7 @@ fn updateLazySymbol(
|
||||
atom_ptr.size = code.len;
|
||||
atom_ptr.output_section_index = output_section_index;
|
||||
|
||||
try atom_ptr.allocate(elf_file);
|
||||
try self.allocateAtom(atom_ptr, elf_file);
|
||||
errdefer self.freeNavMetadata(elf_file, symbol_index);
|
||||
|
||||
local_sym.value = 0;
|
||||
@@ -1797,8 +1796,7 @@ fn lowerConst(
|
||||
atom_ptr.size = code.len;
|
||||
atom_ptr.output_section_index = output_section_index;
|
||||
|
||||
try atom_ptr.allocate(elf_file);
|
||||
// TODO rename and re-audit this method
|
||||
try self.allocateAtom(atom_ptr, elf_file);
|
||||
errdefer self.freeNavMetadata(elf_file, sym_index);
|
||||
|
||||
const shdr = elf_file.sections.items(.shdr)[output_section_index];
|
||||
@@ -1998,6 +1996,60 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void {
|
||||
const alloc_res = try elf_file.allocateChunk(atom_ptr.output_section_index, atom_ptr.size, atom_ptr.alignment);
|
||||
atom_ptr.value = @intCast(alloc_res.value);
|
||||
|
||||
const slice = elf_file.sections.slice();
|
||||
const shdr = &slice.items(.shdr)[atom_ptr.output_section_index];
|
||||
const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index];
|
||||
|
||||
const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
|
||||
placement_atom.nextAtom(elf_file) == null
|
||||
else
|
||||
true;
|
||||
if (expand_section) {
|
||||
last_atom_ref.* = atom_ptr.ref();
|
||||
if (self.dwarf) |_| {
|
||||
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
|
||||
// range of the compilation unit. When we expand the text section, this range changes,
|
||||
// so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
|
||||
self.debug_info_section_dirty = true;
|
||||
// This becomes dirty for the same reason. We could potentially make this more
|
||||
// fine-grained with the addition of support for more compilation units. It is planned to
|
||||
// model each package as a different compilation unit.
|
||||
self.debug_aranges_section_dirty = true;
|
||||
self.debug_rnglists_section_dirty = true;
|
||||
}
|
||||
}
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?);
|
||||
|
||||
// This function can also reallocate an atom.
|
||||
// In this case we need to "unplug" it from its previous location before
|
||||
// plugging it in to its new location.
|
||||
if (atom_ptr.prevAtom(elf_file)) |prev| {
|
||||
prev.next_atom_ref = atom_ptr.next_atom_ref;
|
||||
}
|
||||
if (atom_ptr.nextAtom(elf_file)) |next| {
|
||||
next.prev_atom_ref = atom_ptr.prev_atom_ref;
|
||||
}
|
||||
|
||||
if (elf_file.atom(alloc_res.placement)) |big_atom| {
|
||||
atom_ptr.prev_atom_ref = alloc_res.placement;
|
||||
atom_ptr.next_atom_ref = big_atom.next_atom_ref;
|
||||
big_atom.next_atom_ref = atom_ptr.ref();
|
||||
} else {
|
||||
atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 };
|
||||
atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 };
|
||||
}
|
||||
}
|
||||
|
||||
fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void {
|
||||
if (!atom_ptr.alignment.check(@intCast(atom_ptr.value)) or atom_ptr.size > atom_ptr.capacity(elf_file)) {
|
||||
try self.allocateAtom(atom_ptr, elf_file);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn asFile(self: *ZigObject) File {
|
||||
return .{ .zig_object = self };
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user