wasm linker: always passive when importing memory
and detect passive inits from Zcu don't forget to intern function type for __wasm_init_memory make that function the start function if it is present don't skip emitting passive data segment data to the binary
This commit is contained in:
@@ -1968,7 +1968,7 @@ pub const DataSegmentId = enum(u32) {
|
||||
|
||||
pub fn isPassive(id: DataSegmentId, wasm: *const Wasm) bool {
|
||||
const comp = wasm.base.comp;
|
||||
if (comp.config.import_memory and !id.isBss(wasm)) return true;
|
||||
if (comp.config.import_memory) return true;
|
||||
return switch (unpack(id, wasm)) {
|
||||
.__zig_error_names,
|
||||
.__zig_error_name_table,
|
||||
@@ -4614,7 +4614,10 @@ fn lowerZcuData(wasm: *Wasm, pt: Zcu.PerThread, ip_index: InternPool.Index) !Zcu
|
||||
.off = .none,
|
||||
.len = naive_code.len,
|
||||
};
|
||||
} else naive_code;
|
||||
} else c: {
|
||||
wasm.any_passive_inits = wasm.any_passive_inits or wasm.base.comp.config.import_memory;
|
||||
break :c naive_code;
|
||||
};
|
||||
|
||||
return .{
|
||||
.code = code,
|
||||
|
||||
@@ -282,6 +282,8 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
// function.
|
||||
if (wasm.any_passive_inits) {
|
||||
wasm.functions.putAssumeCapacity(.__wasm_init_memory, {});
|
||||
const empty = try wasm.internValtypeList(&.{});
|
||||
_ = try wasm.addFuncType(.{ .params = empty, .returns = empty });
|
||||
}
|
||||
|
||||
// When we have TLS GOT entries and shared memory is enabled,
|
||||
@@ -711,9 +713,10 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
}
|
||||
|
||||
// start section
|
||||
if (Wasm.OutputFunctionIndex.fromResolution(wasm, wasm.entry_resolution)) |func_index| {
|
||||
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
|
||||
replaceVecSectionHeader(binary_bytes, header_offset, .start, @intFromEnum(func_index));
|
||||
if (wasm.functions.getIndex(.__wasm_init_memory)) |func_index| {
|
||||
try emitStartSection(gpa, binary_bytes, .fromFunctionIndex(wasm, @enumFromInt(func_index)));
|
||||
} else if (Wasm.OutputFunctionIndex.fromResolution(wasm, wasm.entry_resolution)) |func_index| {
|
||||
try emitStartSection(gpa, binary_bytes, func_index);
|
||||
}
|
||||
|
||||
// element section
|
||||
@@ -846,10 +849,10 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
group_end_addr = f.data_segment_groups.items[group_index].end_addr;
|
||||
segment_offset = 0;
|
||||
}
|
||||
const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active;
|
||||
if (segment_offset == 0) {
|
||||
const group_size = group_end_addr - group_start_addr;
|
||||
log.debug("emit data section group, {d} bytes", .{group_size});
|
||||
const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active;
|
||||
try leb.writeUleb128(binary_writer, @intFromEnum(flags));
|
||||
// Passive segments are initialized at runtime.
|
||||
if (flags != .passive) {
|
||||
@@ -857,7 +860,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
|
||||
}
|
||||
try leb.writeUleb128(binary_writer, group_size);
|
||||
}
|
||||
if (flags == .passive or segment_id.isEmpty(wasm)) {
|
||||
if (segment_id.isEmpty(wasm)) {
|
||||
// It counted for virtual memory but it does not go into the binary.
|
||||
continue;
|
||||
}
|
||||
@@ -1900,6 +1903,11 @@ fn emitInitMemoryFunction(
|
||||
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
|
||||
}
|
||||
|
||||
fn emitStartSection(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) !void {
|
||||
const header_offset = try reserveVecSectionHeader(gpa, bytes);
|
||||
replaceVecSectionHeader(bytes, header_offset, .start, @intFromEnum(i));
|
||||
}
|
||||
|
||||
fn emitTagNameFunction(
|
||||
wasm: *Wasm,
|
||||
code: *std.ArrayListUnmanaged(u8),
|
||||
|
||||
Reference in New Issue
Block a user