compute LLVMTypes in ParamTypeIterator (#13592)

follow up on #13376 - fixes a bug in the x86_64 C ABI. 

Co-authored-by: Veikka Tuominen <git@vexu.eu>
This commit is contained in:
Guillaume Wenzek
2022-11-20 10:14:02 +01:00
committed by GitHub
parent fca776f8f5
commit 476b946802
3 changed files with 77 additions and 137 deletions

View File

@@ -1049,60 +1049,20 @@ pub const Object = struct {
const aggregate = builder.buildInsertValue(partial, len_param, 1, "");
try args.append(aggregate);
},
.multiple_llvm_ints => {
.multiple_llvm_types => {
assert(!it.byval_attr);
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target);
var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_ints.len];
for (llvm_ints) |int_bits, i| {
field_types[i] = dg.context.intType(int_bits);
}
const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
for (llvm_ints) |_, field_i_usize| {
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
const casted_ptr = builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), "");
for (field_types) |_, field_i_usize| {
const field_i = @intCast(c_uint, field_i_usize);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
const field_ptr = builder.buildStructGEP(ints_llvm_ty, casted_ptr, field_i, "");
const store_inst = builder.buildStore(param, field_ptr);
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
}
const is_by_ref = isByRef(param_ty);
const loaded = if (is_by_ref) arg_ptr else l: {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
load_inst.setAlignment(param_alignment);
break :l load_inst;
};
try args.append(loaded);
},
.multiple_llvm_float => {
assert(!it.byval_attr);
const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target);
var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_floats.len];
for (llvm_floats) |float_bits, i| {
switch (float_bits) {
64 => field_types[i] = dg.context.doubleType(),
80 => field_types[i] = dg.context.x86FP80Type(),
else => {},
}
}
const floats_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
const casted_ptr = builder.buildBitCast(arg_ptr, floats_llvm_ty.pointerType(0), "");
for (llvm_floats) |_, field_i_usize| {
const field_i = @intCast(c_uint, field_i_usize);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
const field_ptr = builder.buildStructGEP(floats_llvm_ty, casted_ptr, field_i, "");
const field_ptr = builder.buildStructGEP(llvm_ty, casted_ptr, field_i, "");
const store_inst = builder.buildStore(param, field_ptr);
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
}
@@ -2626,8 +2586,7 @@ pub const DeclGen = struct {
// No attributes needed for these.
.no_bits,
.abi_sized_int,
.multiple_llvm_ints,
.multiple_llvm_float,
.multiple_llvm_types,
.as_u16,
.float_array,
.i32_array,
@@ -3167,25 +3126,8 @@ pub const DeclGen = struct {
llvm_params.appendAssumeCapacity(ptr_llvm_ty);
llvm_params.appendAssumeCapacity(len_llvm_ty);
},
.multiple_llvm_ints => {
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
for (llvm_ints) |int_bits| {
const big_int_ty = dg.context.intType(int_bits);
llvm_params.appendAssumeCapacity(big_int_ty);
}
},
.multiple_llvm_float => {
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
for (llvm_ints) |float_bits| {
const float_ty = switch (float_bits) {
64 => dg.context.doubleType(),
80 => dg.context.x86FP80Type(),
else => unreachable,
};
llvm_params.appendAssumeCapacity(float_ty);
}
.multiple_llvm_types => {
try llvm_params.appendSlice(it.llvm_types_buffer[0..it.llvm_types_len]);
},
.as_u16 => {
try llvm_params.append(dg.context.intType(16));
@@ -4824,10 +4766,10 @@ pub const FuncGen = struct {
llvm_args.appendAssumeCapacity(ptr);
llvm_args.appendAssumeCapacity(len);
},
.multiple_llvm_ints => {
.multiple_llvm_types => {
const arg = args[it.zig_index - 1];
const param_ty = self.air.typeOf(arg);
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len];
const llvm_arg = try self.resolveInst(arg);
const is_by_ref = isByRef(param_ty);
const arg_ptr = if (is_by_ref) llvm_arg else p: {
@@ -4837,51 +4779,13 @@ pub const FuncGen = struct {
break :p p;
};
var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_ints.len];
for (llvm_ints) |int_bits, i| {
field_types[i] = self.dg.context.intType(int_bits);
}
const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
const llvm_ty = self.dg.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
const casted_ptr = self.builder.buildBitCast(arg_ptr, llvm_ty.pointerType(0), "");
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
for (llvm_ints) |_, i_usize| {
for (llvm_types) |field_ty, i_usize| {
const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(ints_llvm_ty, casted_ptr, i, "");
const load_inst = self.builder.buildLoad(field_types[i], field_ptr, "");
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
llvm_args.appendAssumeCapacity(load_inst);
}
},
.multiple_llvm_float => {
const arg = args[it.zig_index - 1];
const param_ty = self.air.typeOf(arg);
const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
const llvm_arg = try self.resolveInst(arg);
const is_by_ref = isByRef(param_ty);
const arg_ptr = if (is_by_ref) llvm_arg else p: {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
store_inst.setAlignment(param_ty.abiAlignment(target));
break :p p;
};
var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_floats.len];
for (llvm_floats) |float_bits, i| {
switch (float_bits) {
64 => field_types[i] = self.dg.context.doubleType(),
80 => field_types[i] = self.dg.context.x86FP80Type(),
else => {},
}
}
const floats_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
const casted_ptr = self.builder.buildBitCast(arg_ptr, floats_llvm_ty.pointerType(0), "");
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
for (llvm_floats) |_, i_usize| {
const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(floats_llvm_ty, casted_ptr, i, "");
const load_inst = self.builder.buildLoad(field_types[i], field_ptr, "");
const field_ptr = self.builder.buildStructGEP(llvm_ty, casted_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
llvm_args.appendAssumeCapacity(load_inst);
}
@@ -10473,7 +10377,7 @@ const ParamTypeIterator = struct {
llvm_index: u32,
target: std.Target,
llvm_types_len: u32,
llvm_types_buffer: [8]u16,
llvm_types_buffer: [8]*llvm.Type,
byval_attr: bool,
const Lowering = union(enum) {
@@ -10481,8 +10385,7 @@ const ParamTypeIterator = struct {
byval,
byref,
abi_sized_int,
multiple_llvm_ints,
multiple_llvm_float,
multiple_llvm_types,
slice,
as_u16,
float_array: u8,
@@ -10515,7 +10418,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
return .no_bits;
}
const dg = it.dg;
switch (it.fn_info.cc) {
.Unspecified, .Inline => {
it.zig_index += 1;
@@ -10584,28 +10487,28 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .byval;
}
var llvm_types_buffer: [8]u16 = undefined;
var llvm_types_buffer: [8]*llvm.Type = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
switch (class) {
.integer => {
llvm_types_buffer[llvm_types_index] = 64;
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
llvm_types_index += 1;
},
.sse => {
llvm_types_buffer[llvm_types_index] = 64;
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
llvm_types_index += 1;
},
.sseup => {
llvm_types_buffer[llvm_types_index] = 64;
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
llvm_types_index += 1;
},
.x87 => {
llvm_types_buffer[llvm_types_index] = 80;
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
llvm_types_index += 1;
},
.x87up => {
llvm_types_buffer[llvm_types_index] = 80;
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
llvm_types_index += 1;
},
.complex_x87 => {
@@ -10625,7 +10528,7 @@ const ParamTypeIterator = struct {
it.llvm_types_len = llvm_types_index;
it.llvm_index += llvm_types_index;
it.zig_index += 1;
return if (classes[0] == .integer) .multiple_llvm_ints else .multiple_llvm_float;
return .multiple_llvm_types;
},
},
.wasm32 => {
@@ -10649,8 +10552,8 @@ const ParamTypeIterator = struct {
.byval => return .byval,
.integer => {
it.llvm_types_len = 1;
it.llvm_types_buffer[0] = 64;
return .multiple_llvm_ints;
it.llvm_types_buffer[0] = dg.context.intType(64);
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
}