codegen: support generating packed structs
This commit is contained in:
@@ -2146,6 +2146,10 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
};
|
||||
const int_elem_ty = Type.initPayload(&int_ty_payload.base);
|
||||
|
||||
if (isByRef(int_elem_ty, func.target)) {
|
||||
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
|
||||
}
|
||||
|
||||
var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1);
|
||||
mask <<= @intCast(u6, ptr_info.bit_offset);
|
||||
mask ^= ~@as(u64, 0);
|
||||
|
||||
@@ -556,14 +556,42 @@ pub fn generateSymbol(
|
||||
},
|
||||
.Struct => {
|
||||
if (typed_value.ty.containerLayout() == .Packed) {
|
||||
return Result{
|
||||
.fail = try ErrorMsg.create(
|
||||
bin_file.allocator,
|
||||
src_loc,
|
||||
"TODO implement generateSymbol for packed struct",
|
||||
.{},
|
||||
),
|
||||
};
|
||||
const struct_obj = typed_value.ty.castTag(.@"struct").?.data;
|
||||
const fields = struct_obj.fields.values();
|
||||
const field_vals = typed_value.val.castTag(.aggregate).?.data;
|
||||
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
const mod = bin_file.options.module.?;
|
||||
try code.resize(current_pos + abi_size);
|
||||
var bits: u16 = 0;
|
||||
|
||||
for (field_vals) |field_val, index| {
|
||||
const field_ty = fields[index].ty;
|
||||
// pointer may point to a decl which must be marked used
|
||||
// but can also result in a relocation. Therefore we handle those seperately.
|
||||
if (field_ty.zigTypeTag() == .Pointer) {
|
||||
std.debug.print("Hit!\n", .{});
|
||||
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_ty.abiSize(target));
|
||||
defer tmp_list.deinit();
|
||||
switch (try generateSymbol(bin_file, src_loc, .{
|
||||
.ty = field_ty,
|
||||
.val = field_val,
|
||||
}, &tmp_list, debug_output, reloc_info)) {
|
||||
.appended => {
|
||||
mem.copy(u8, code.items[current_pos..], tmp_list.items);
|
||||
},
|
||||
.externally_managed => |external_slice| {
|
||||
mem.copy(u8, code.items[current_pos..], external_slice);
|
||||
},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
} else {
|
||||
field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits);
|
||||
}
|
||||
bits += @intCast(u16, field_ty.bitSize(target));
|
||||
}
|
||||
|
||||
return Result{ .appended = {} };
|
||||
}
|
||||
|
||||
const struct_begin = code.items.len;
|
||||
|
||||
@@ -1841,8 +1841,9 @@ fn setupStart(wasm: *Wasm) !void {
|
||||
/// Sets up the memory section of the wasm module, as well as the stack.
|
||||
fn setupMemory(wasm: *Wasm) !void {
|
||||
log.debug("Setting up memory layout", .{});
|
||||
const page_size = 64 * 1024;
|
||||
const stack_size = wasm.base.options.stack_size_override orelse page_size * 1;
|
||||
const page_size = std.wasm.page_size; // 65kb
|
||||
// Use the user-provided stack size or else we use 1MB by default
|
||||
const stack_size = wasm.base.options.stack_size_override orelse page_size * 16;
|
||||
const stack_alignment = 16; // wasm's stack alignment as specified by tool-convention
|
||||
// Always place the stack at the start by default
|
||||
// unless the user specified the global-base flag
|
||||
|
||||
Reference in New Issue
Block a user