Merge pull request #6056 from ifreund/wasm-backend

stage2: add a wasm backend
This commit is contained in:
Andrew Kelley
2020-08-18 00:28:05 -04:00
committed by GitHub
7 changed files with 647 additions and 18 deletions

View File

@@ -974,7 +974,7 @@ pub fn update(self: *Module) !void {
}
// This is needed before reading the error flags.
try self.bin_file.flush();
try self.bin_file.flush(self);
self.link_error_flags = self.bin_file.errorFlags();
@@ -1571,7 +1571,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
.macho => {
// TODO Implement for MachO
},
.c => {},
.c, .wasm => {},
}
}
} else {
@@ -1781,11 +1781,13 @@ fn allocateNewDecl(
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
},
.fn_link = switch (self.bin_file.tag) {
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = null },
},
.generation = 0,
};

View File

@@ -0,0 +1,119 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const leb = std.debug.leb;
const mem = std.mem;
const Decl = @import("../Module.zig").Decl;
const Inst = @import("../ir.zig").Inst;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
fn genValtype(ty: Type) u8 {
return switch (ty.tag()) {
.u32, .i32 => 0x7F,
.u64, .i64 => 0x7E,
.f32 => 0x7D,
.f64 => 0x7C,
else => @panic("TODO: Implement more types for wasm."),
};
}
pub fn genFunctype(buf: *ArrayList(u8), decl: *Decl) !void {
const ty = decl.typed_value.most_recent.typed_value.ty;
const writer = buf.writer();
// functype magic
try writer.writeByte(0x60);
// param types
try leb.writeULEB128(writer, @intCast(u32, ty.fnParamLen()));
if (ty.fnParamLen() != 0) {
const params = try buf.allocator.alloc(Type, ty.fnParamLen());
defer buf.allocator.free(params);
ty.fnParamTypes(params);
for (params) |param_type| try writer.writeByte(genValtype(param_type));
}
// return type
const return_type = ty.fnReturnType();
switch (return_type.tag()) {
.void, .noreturn => try leb.writeULEB128(writer, @as(u32, 0)),
else => {
try leb.writeULEB128(writer, @as(u32, 1));
try writer.writeByte(genValtype(return_type));
},
}
}
pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void {
assert(buf.items.len == 0);
const writer = buf.writer();
// Reserve space to write the size after generating the code
try buf.resize(5);
// Write the size of the locals vec
// TODO: implement locals
try leb.writeULEB128(writer, @as(u32, 0));
// Write instructions
// TODO: check for and handle death of instructions
const tv = decl.typed_value.most_recent.typed_value;
const mod_fn = tv.val.cast(Value.Payload.Function).?.func;
for (mod_fn.analysis.success.instructions) |inst| try genInst(writer, inst);
// Write 'end' opcode
try writer.writeByte(0x0B);
// Fill in the size of the generated code to the reserved space at the
// beginning of the buffer.
leb.writeUnsignedFixed(5, buf.items[0..5], @intCast(u32, buf.items.len - 5));
}
fn genInst(writer: ArrayList(u8).Writer, inst: *Inst) !void {
return switch (inst.tag) {
.dbg_stmt => {},
.ret => genRet(writer, inst.castTag(.ret).?),
else => error.TODOImplementMoreWasmCodegen,
};
}
fn genRet(writer: ArrayList(u8).Writer, inst: *Inst.UnOp) !void {
switch (inst.operand.tag) {
.constant => {
const constant = inst.operand.castTag(.constant).?;
switch (inst.operand.ty.tag()) {
.u32 => {
try writer.writeByte(0x41); // i32.const
try leb.writeILEB128(writer, constant.val.toUnsignedInt());
},
.i32 => {
try writer.writeByte(0x41); // i32.const
try leb.writeILEB128(writer, constant.val.toSignedInt());
},
.u64 => {
try writer.writeByte(0x42); // i64.const
try leb.writeILEB128(writer, constant.val.toUnsignedInt());
},
.i64 => {
try writer.writeByte(0x42); // i64.const
try leb.writeILEB128(writer, constant.val.toSignedInt());
},
.f32 => {
try writer.writeByte(0x43); // f32.const
// TODO: enforce LE byte order
try writer.writeAll(mem.asBytes(&constant.val.toFloat(f32)));
},
.f64 => {
try writer.writeByte(0x44); // f64.const
// TODO: enforce LE byte order
try writer.writeAll(mem.asBytes(&constant.val.toFloat(f64)));
},
else => return error.TODOImplementMoreWasmCodegen,
}
},
else => return error.TODOImplementMoreWasmCodegen,
}
}

View File

@@ -46,12 +46,14 @@ pub const File = struct {
elf: Elf.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
};
pub const LinkFn = union {
elf: Elf.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
};
tag: Tag,
@@ -69,7 +71,7 @@ pub const File = struct {
.coff => return error.TODOImplementCoff,
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return error.TODOImplementWasm,
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
.c => return C.openPath(allocator, dir, sub_path, options),
.hex => return error.TODOImplementHex,
.raw => return error.TODOImplementRaw,
@@ -93,15 +95,18 @@ pub const File = struct {
.mode = determineMode(base.options),
});
},
.c => {},
.c, .wasm => {},
}
}
pub fn makeExecutable(base: *File) !void {
std.debug.assert(base.tag != .c);
if (base.file) |f| {
f.close();
base.file = null;
switch (base.tag) {
.c => unreachable,
.wasm => {},
else => if (base.file) |f| {
f.close();
base.file = null;
},
}
}
@@ -110,6 +115,7 @@ pub const File = struct {
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
}
}
@@ -117,7 +123,7 @@ pub const File = struct {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c => {},
.c, .wasm => {},
}
}
@@ -125,7 +131,7 @@ pub const File = struct {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c => {},
.c, .wasm => {},
}
}
@@ -135,6 +141,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
.wasm => @fieldParentPtr(Wasm, "base", base).deinit(),
}
}
@@ -155,18 +162,23 @@ pub const File = struct {
parent.deinit();
base.allocator.destroy(parent);
},
.wasm => {
const parent = @fieldParentPtr(Wasm, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
}
}
/// Commit pending changes and write headers.
pub fn flush(base: *File) !void {
pub fn flush(base: *File, module: *Module) !void {
const tracy = trace(@src());
defer tracy.end();
try switch (base.tag) {
.elf => @fieldParentPtr(Elf, "base", base).flush(),
.macho => @fieldParentPtr(MachO, "base", base).flush(),
.c => @fieldParentPtr(C, "base", base).flush(),
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
.c => @fieldParentPtr(C, "base", base).flush(module),
.wasm => @fieldParentPtr(Wasm, "base", base).flush(module),
};
}
@@ -175,6 +187,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
}
}
@@ -183,6 +196,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
.wasm => return ErrorFlags{},
};
}
@@ -197,6 +211,7 @@ pub const File = struct {
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
}
}
@@ -204,6 +219,7 @@ pub const File = struct {
elf,
macho,
c,
wasm,
};
pub const ErrorFlags = struct {
@@ -270,7 +286,7 @@ pub const File = struct {
};
}
pub fn flush(self: *File.C) !void {
pub fn flush(self: *File.C, module: *Module) !void {
const writer = self.base.file.?.writer();
try writer.writeAll(@embedFile("cbe.h"));
var includes = false;
@@ -1023,7 +1039,8 @@ pub const File = struct {
pub const abbrev_pad1 = 5;
pub const abbrev_parameter = 6;
pub fn flush(self: *Elf) !void {
/// Commit pending changes and write headers.
pub fn flush(self: *Elf, module: *Module) !void {
const target_endian = self.base.options.target.cpu.arch.endian();
const foreign_endian = target_endian != std.Target.current.cpu.arch.endian();
const ptr_width_bytes: u8 = self.ptrWidthBytes();
@@ -2832,6 +2849,7 @@ pub const File = struct {
};
pub const MachO = @import("link/MachO.zig");
const Wasm = @import("link/Wasm.zig");
};
/// Saturating multiplication

View File

@@ -73,7 +73,7 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Mach
}
}
pub fn flush(self: *MachO) !void {}
pub fn flush(self: *MachO, module: *Module) !void {}
pub fn deinit(self: *MachO) void {}

View File

@@ -0,0 +1,453 @@
const Wasm = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fs = std.fs;
const leb = std.debug.leb;
const Module = @import("../Module.zig");
const codegen = @import("../codegen/wasm.zig");
const link = @import("../link.zig");
/// Various magic numbers defined by the wasm spec
const spec = struct {
const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm
const version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // version 1
const custom_id = 0;
const types_id = 1;
const imports_id = 2;
const funcs_id = 3;
const tables_id = 4;
const memories_id = 5;
const globals_id = 6;
const exports_id = 7;
const start_id = 8;
const elements_id = 9;
const code_id = 10;
const data_id = 11;
};
pub const base_tag = link.File.Tag.wasm;
pub const FnData = struct {
funcidx: u32,
};
base: link.File,
types: Types,
funcs: Funcs,
exports: Exports,
/// Array over the section structs used in the various sections above to
/// allow iteration when shifting sections to make space.
/// TODO: this should eventually be size 11 when we use all the sections.
sections: [4]*Section,
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .wasm);
// TODO: read the file and keep vaild parts instead of truncating
const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true });
errdefer file.close();
const wasm = try allocator.create(Wasm);
errdefer allocator.destroy(wasm);
try file.writeAll(&(spec.magic ++ spec.version));
// TODO: this should vary depending on the section and be less arbitrary
const size = 1024;
const offset = @sizeOf(@TypeOf(spec.magic ++ spec.version));
wasm.* = .{
.base = .{
.tag = .wasm,
.options = options,
.file = file,
.allocator = allocator,
},
.types = try Types.init(file, offset, size),
.funcs = try Funcs.init(file, offset + size, size, offset + 3 * size, size),
.exports = try Exports.init(file, offset + 2 * size, size),
// These must be ordered as they will appear in the output file
.sections = [_]*Section{
&wasm.types.typesec.section,
&wasm.funcs.funcsec,
&wasm.exports.exportsec,
&wasm.funcs.codesec.section,
},
};
try file.setEndPos(offset + 4 * size);
return &wasm.base;
}
pub fn deinit(self: *Wasm) void {
self.types.deinit();
self.funcs.deinit();
}
pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
if (decl.typed_value.most_recent.typed_value.ty.zigTypeTag() != .Fn)
return error.TODOImplementNonFnDeclsForWasm;
if (decl.fn_link.wasm) |fn_data| {
self.funcs.free(fn_data.funcidx);
}
var buf = std.ArrayList(u8).init(self.base.allocator);
defer buf.deinit();
try codegen.genFunctype(&buf, decl);
const typeidx = try self.types.new(buf.items);
buf.items.len = 0;
try codegen.genCode(&buf, decl);
const funcidx = try self.funcs.new(typeidx, buf.items);
decl.fn_link.wasm = .{ .funcidx = funcidx };
// TODO: we should be more smart and set this only when needed
self.exports.dirty = true;
}
pub fn updateDeclExports(
self: *Wasm,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
self.exports.dirty = true;
}
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
// TODO: remove this assert when non-function Decls are implemented
assert(decl.typed_value.most_recent.typed_value.ty.zigTypeTag() == .Fn);
if (decl.fn_link.wasm) |fn_data| {
self.funcs.free(fn_data.funcidx);
decl.fn_link.wasm = null;
}
}
pub fn flush(self: *Wasm, module: *Module) !void {
if (self.exports.dirty) try self.exports.writeAll(module);
}
/// This struct describes the location of a named section + custom section
/// padding in the output file. This is all the data we need to allow for
/// shifting sections around when padding runs out.
const Section = struct {
/// The size of a section header: 1 byte section id + 5 bytes
/// for the fixed-width ULEB128 encoded contents size.
const header_size = 1 + 5;
/// Offset of the section id byte from the start of the file.
offset: u64,
/// Size of the section, including the header and directly
/// following custom section used for padding if any.
size: u64,
/// Resize the usable part of the section, handling the following custom
/// section used for padding. If there is not enough padding left, shift
/// all following sections to make space. Takes the current and target
/// contents sizes of the section as arguments.
fn resize(self: *Section, file: fs.File, current: u32, target: u32) !void {
// Section header + target contents size + custom section header
// + custom section name + empty custom section > owned chunk of the file
if (header_size + target + header_size + 1 + 0 > self.size)
return error.TODOImplementSectionShifting;
const new_custom_start = self.offset + header_size + target;
const new_custom_contents_size = self.size - target - 2 * header_size;
assert(new_custom_contents_size >= 1);
// +1 for the name of the custom section, which we set to an empty string
var custom_header: [header_size + 1]u8 = undefined;
custom_header[0] = spec.custom_id;
leb.writeUnsignedFixed(5, custom_header[1..header_size], @intCast(u32, new_custom_contents_size));
custom_header[header_size] = 0;
try file.pwriteAll(&custom_header, new_custom_start);
}
};
/// This can be used to manage the contents of any section which uses a vector
/// of contents. This interface maintains index stability while allowing for
/// reuse of "dead" indexes.
const VecSection = struct {
/// Represents a single entry in the vector (e.g. a type in the type section)
const Entry = struct {
/// Offset from the start of the section contents in bytes
offset: u32,
/// Size in bytes of the entry
size: u32,
};
section: Section,
/// Size in bytes of the contents of the section. Does not include
/// the "header" containing the section id and this value.
contents_size: u32,
/// List of all entries in the contents of the section.
entries: std.ArrayListUnmanaged(Entry) = std.ArrayListUnmanaged(Entry){},
/// List of indexes of unreferenced entries which may be
/// overwritten and reused.
dead_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
/// Write the headers of the section and custom padding section
fn init(comptime section_id: u8, file: fs.File, offset: u64, initial_size: u64) !VecSection {
// section id, section size, empty vector, custom section id,
// custom section size, empty custom section name
var initial_data: [1 + 5 + 5 + 1 + 5 + 1]u8 = undefined;
assert(initial_size >= initial_data.len);
comptime var i = 0;
initial_data[i] = section_id;
i += 1;
leb.writeUnsignedFixed(5, initial_data[i..(i + 5)], 5);
i += 5;
leb.writeUnsignedFixed(5, initial_data[i..(i + 5)], 0);
i += 5;
initial_data[i] = spec.custom_id;
i += 1;
leb.writeUnsignedFixed(5, initial_data[i..(i + 5)], @intCast(u32, initial_size - @sizeOf(@TypeOf(initial_data))));
i += 5;
initial_data[i] = 0;
try file.pwriteAll(&initial_data, offset);
return VecSection{
.section = .{
.offset = offset,
.size = initial_size,
},
.contents_size = 5,
};
}
fn deinit(self: *VecSection, allocator: *Allocator) void {
self.entries.deinit(allocator);
self.dead_list.deinit(allocator);
}
/// Write a new entry into the file, returning the index used.
fn addEntry(self: *VecSection, file: fs.File, allocator: *Allocator, data: []const u8) !u32 {
// First look for a dead entry we can reuse
for (self.dead_list.items) |dead_idx, i| {
const dead_entry = &self.entries.items[dead_idx];
if (dead_entry.size == data.len) {
// Found a dead entry of the right length, overwrite it
try file.pwriteAll(data, self.section.offset + Section.header_size + dead_entry.offset);
_ = self.dead_list.swapRemove(i);
return dead_idx;
}
}
// TODO: We can be more efficient if we special-case one or
// more consecutive dead entries at the end of the vector.
// We failed to find a dead entry to reuse, so write the new
// entry to the end of the section.
try self.section.resize(file, self.contents_size, self.contents_size + @intCast(u32, data.len));
try file.pwriteAll(data, self.section.offset + Section.header_size + self.contents_size);
try self.entries.append(allocator, .{
.offset = self.contents_size,
.size = @intCast(u32, data.len),
});
self.contents_size += @intCast(u32, data.len);
// Make sure the dead list always has enough space to store all free'd
// entries. This makes it so that delEntry() cannot fail.
// TODO: figure out a better way that doesn't waste as much memory
try self.dead_list.ensureCapacity(allocator, self.entries.items.len);
// Update the size in the section header and the item count of
// the contents vector.
var size_and_count: [10]u8 = undefined;
leb.writeUnsignedFixed(5, size_and_count[0..5], self.contents_size);
leb.writeUnsignedFixed(5, size_and_count[5..], @intCast(u32, self.entries.items.len));
try file.pwriteAll(&size_and_count, self.section.offset + 1);
return @intCast(u32, self.entries.items.len - 1);
}
/// Mark the type referenced by the given index as dead.
fn delEntry(self: *VecSection, index: u32) void {
self.dead_list.appendAssumeCapacity(index);
}
};
const Types = struct {
typesec: VecSection,
fn init(file: fs.File, offset: u64, initial_size: u64) !Types {
return Types{ .typesec = try VecSection.init(spec.types_id, file, offset, initial_size) };
}
fn deinit(self: *Types) void {
const wasm = @fieldParentPtr(Wasm, "types", self);
self.typesec.deinit(wasm.base.allocator);
}
fn new(self: *Types, data: []const u8) !u32 {
const wasm = @fieldParentPtr(Wasm, "types", self);
return self.typesec.addEntry(wasm.base.file.?, wasm.base.allocator, data);
}
fn free(self: *Types, typeidx: u32) void {
self.typesec.delEntry(typeidx);
}
};
const Funcs = struct {
/// This section needs special handling to keep the indexes matching with
/// the codesec, so we cant just use a VecSection.
funcsec: Section,
/// The typeidx stored for each function, indexed by funcidx.
func_types: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
codesec: VecSection,
fn init(file: fs.File, funcs_offset: u64, funcs_size: u64, code_offset: u64, code_size: u64) !Funcs {
return Funcs{
.funcsec = (try VecSection.init(spec.funcs_id, file, funcs_offset, funcs_size)).section,
.codesec = try VecSection.init(spec.code_id, file, code_offset, code_size),
};
}
fn deinit(self: *Funcs) void {
const wasm = @fieldParentPtr(Wasm, "funcs", self);
self.func_types.deinit(wasm.base.allocator);
self.codesec.deinit(wasm.base.allocator);
}
/// Add a new function to the binary, first finding space for and writing
/// the code then writing the typeidx to the corresponding index in the
/// funcsec. Returns the function index used.
fn new(self: *Funcs, typeidx: u32, code: []const u8) !u32 {
const wasm = @fieldParentPtr(Wasm, "funcs", self);
const file = wasm.base.file.?;
const allocator = wasm.base.allocator;
assert(self.func_types.items.len == self.codesec.entries.items.len);
// TODO: consider nop-padding the code if there is a close but not perfect fit
const funcidx = try self.codesec.addEntry(file, allocator, code);
if (self.func_types.items.len < self.codesec.entries.items.len) {
// u32 vector length + funcs_count u32s in the vector
const current = 5 + @intCast(u32, self.func_types.items.len) * 5;
try self.funcsec.resize(file, current, current + 5);
try self.func_types.append(allocator, typeidx);
// Update the size in the section header and the item count of
// the contents vector.
const count = @intCast(u32, self.func_types.items.len);
var size_and_count: [10]u8 = undefined;
leb.writeUnsignedFixed(5, size_and_count[0..5], 5 + count * 5);
leb.writeUnsignedFixed(5, size_and_count[5..], count);
try file.pwriteAll(&size_and_count, self.funcsec.offset + 1);
} else {
// We are overwriting a dead function and may now free the type
wasm.types.free(self.func_types.items[funcidx]);
}
assert(self.func_types.items.len == self.codesec.entries.items.len);
var typeidx_leb: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &typeidx_leb, typeidx);
try file.pwriteAll(&typeidx_leb, self.funcsec.offset + Section.header_size + 5 + funcidx * 5);
return funcidx;
}
fn free(self: *Funcs, funcidx: u32) void {
self.codesec.delEntry(funcidx);
}
};
/// Exports are tricky. We can't leave dead entries in the binary as they
/// would obviously be visible from the execution environment. The simplest
/// way to work around this is to re-emit the export section whenever
/// something changes. This also makes it easier to ensure exported function
/// and global indexes are updated as they change.
const Exports = struct {
exportsec: Section,
/// Size in bytes of the contents of the section. Does not include
/// the "header" containing the section id and this value.
contents_size: u32,
/// If this is true, then exports will be rewritten on flush()
dirty: bool,
fn init(file: fs.File, offset: u64, initial_size: u64) !Exports {
return Exports{
.exportsec = (try VecSection.init(spec.exports_id, file, offset, initial_size)).section,
.contents_size = 5,
.dirty = false,
};
}
fn writeAll(self: *Exports, module: *Module) !void {
const wasm = @fieldParentPtr(Wasm, "exports", self);
const file = wasm.base.file.?;
var buf: [5]u8 = undefined;
// First ensure the section is the right size
var export_count: u32 = 0;
var new_contents_size: u32 = 5;
for (module.decl_exports.entries.items) |entry| {
for (entry.value) |e| {
export_count += 1;
new_contents_size += calcSize(e);
}
}
if (new_contents_size != self.contents_size) {
try self.exportsec.resize(file, self.contents_size, new_contents_size);
leb.writeUnsignedFixed(5, &buf, new_contents_size);
try file.pwriteAll(&buf, self.exportsec.offset + 1);
}
try file.seekTo(self.exportsec.offset + Section.header_size);
const writer = file.writer();
// Length of the exports vec
leb.writeUnsignedFixed(5, &buf, export_count);
try writer.writeAll(&buf);
for (module.decl_exports.entries.items) |entry|
for (entry.value) |e| try writeExport(writer, e);
self.dirty = false;
}
/// Return the total number of bytes an export will take.
/// TODO: fixed-width LEB128 is currently used for simplicity, but should
/// be replaced with proper variable-length LEB128 as it is inefficient.
fn calcSize(e: *Module.Export) u32 {
// LEB128 name length + name bytes + export type + LEB128 index
return 5 + @intCast(u32, e.options.name.len) + 1 + 5;
}
/// Write the data for a single export to the given file at a given offset.
/// TODO: fixed-width LEB128 is currently used for simplicity, but should
/// be replaced with proper variable-length LEB128 as it is inefficient.
fn writeExport(writer: anytype, e: *Module.Export) !void {
var buf: [5]u8 = undefined;
// Export name length + name
leb.writeUnsignedFixed(5, &buf, @intCast(u32, e.options.name.len));
try writer.writeAll(&buf);
try writer.writeAll(e.options.name);
switch (e.exported_decl.typed_value.most_recent.typed_value.ty.zigTypeTag()) {
.Fn => {
// Type of the export
try writer.writeByte(0x00);
// Exported function index
leb.writeUnsignedFixed(5, &buf, e.exported_decl.fn_link.wasm.?.funcidx);
try writer.writeAll(&buf);
},
else => return error.TODOImplementNonFnDeclsForWasm,
}
}
};

View File

@@ -152,6 +152,7 @@ const usage_build_generic =
\\ -ofmt=[mode] Override target object format
\\ elf Executable and Linking Format
\\ c Compile to C source code
\\ wasm WebAssembly
\\ coff (planned) Common Object File Format (Windows)
\\ pe (planned) Portable Executable (Windows)
\\ macho (planned) macOS relocatables

View File

@@ -12,6 +12,11 @@ const linux_riscv64 = std.zig.CrossTarget{
.os_tag = .linux,
};
const wasi = std.zig.CrossTarget{
.cpu_arch = .wasm32,
.os_tag = .wasi,
};
pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exe("hello world with updates", linux_x64);
@@ -539,4 +544,35 @@ pub fn addCases(ctx: *TestContext) !void {
"",
);
}
{
var case = ctx.exe("wasm returns", wasi);
case.addCompareOutput(
\\export fn _start() u32 {
\\ return 42;
\\}
,
"42\n",
);
case.addCompareOutput(
\\export fn _start() i64 {
\\ return 42;
\\}
,
"42\n",
);
case.addCompareOutput(
\\export fn _start() f32 {
\\ return 42.0;
\\}
,
// This is what you get when you take the bits of the IEE-754
// representation of 42.0 and reinterpret them as an unsigned
// integer. Guess that's a bug in wasmtime.
"1109917696\n",
);
}
}