stage2: improved union support

* `Module.Union.getFullyQualifiedName` returns a sentinel-terminated
   slice so that backends that need null-termination do not need an
   additional copy.
 * Module.Union: implement a `getLayout` function which returns
   information about ABI size and alignment so that the LLVM backend can
   properly lower union types into llvm types.
 * Sema: `resolveType` now returns `error.GenericPoison` rather than a
   Type with tag `generic_poison`. Callsites that want to allow that
   need to bypass this higher-level function.
 * Sema: implement coercion of enums and enum literals to unions.
 * Sema: fix comptime mutation of pointers to unions
 * LLVM backend: fully implement proper lowering of union types and
   values according to the union layout, and update the handling of AIR
   instructions that deal with unions to support union layouts.
 * LLVM backend: handle `decl_ref_mut`
   - Maybe this should be unreachable since comptime vars should be
     changed to be non-mutable when they go out of scope, but it's
     harmless for the LLVM backend to support lowering the value.
 * Type: fix `requiresComptime` for optionals, pointers, and some other
   types. This function is still wrong for structs, unions, and enums.
This commit is contained in:
Andrew Kelley
2021-10-14 17:44:46 -07:00
parent ed5a5e2293
commit 8b88274781
7 changed files with 583 additions and 236 deletions

View File

@@ -964,7 +964,7 @@ pub const Union = struct {
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![]u8 {
pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![:0]u8 {
return s.owner_decl.getFullyQualifiedName(gpa);
}
@@ -988,7 +988,7 @@ pub const Union = struct {
};
}
pub fn onlyTagHasCodegenBits(u: Union) bool {
pub fn hasAllZeroBitFieldTypes(u: Union) bool {
assert(u.haveFieldTypes());
for (u.fields.values()) |field| {
if (field.ty.hasCodeGenBits()) return false;
@@ -1038,13 +1038,32 @@ pub const Union = struct {
}
pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 {
assert(u.haveFieldTypes());
return u.getLayout(target, have_tag).abi_size;
}
pub const Layout = struct {
abi_size: u64,
abi_align: u32,
most_aligned_field: u32,
most_aligned_field_size: u64,
biggest_field: u32,
payload_size: u64,
payload_align: u32,
tag_align: u32,
tag_size: u64,
};
pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout {
assert(u.status == .have_layout);
const is_packed = u.layout == .Packed;
if (is_packed) @panic("TODO packed unions");
var most_aligned_field: usize = undefined;
var most_aligned_field_size: u64 = undefined;
var biggest_field: usize = undefined;
var payload_size: u64 = 0;
var payload_align: u32 = 0;
for (u.fields.values()) |field| {
for (u.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = a: {
@@ -1054,12 +1073,28 @@ pub const Union = struct {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
payload_size = @maximum(payload_size, field.ty.abiSize(target));
payload_align = @maximum(payload_align, field_align);
}
if (!have_tag) {
return std.mem.alignForwardGeneric(u64, payload_size, payload_align);
const field_size = field.ty.abiSize(target);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = i;
}
if (field_align > payload_align) {
payload_align = field_align;
most_aligned_field = i;
most_aligned_field_size = field_size;
}
}
if (!have_tag) return .{
.abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align),
.abi_align = payload_align,
.most_aligned_field = @intCast(u32, most_aligned_field),
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = @intCast(u32, biggest_field),
.payload_size = payload_size,
.payload_align = payload_align,
.tag_align = 0,
.tag_size = 0,
};
// Put the tag before or after the payload depending on which one's
// alignment is greater.
const tag_size = u.tag_ty.abiSize(target);
@@ -1078,7 +1113,17 @@ pub const Union = struct {
size += tag_size;
size = std.mem.alignForwardGeneric(u64, size, payload_align);
}
return size;
return .{
.abi_size = size,
.abi_align = @maximum(tag_align, payload_align),
.most_aligned_field = @intCast(u32, most_aligned_field),
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = @intCast(u32, biggest_field),
.payload_size = payload_size,
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
};
}
};

View File

@@ -1026,7 +1026,9 @@ fn resolveConstString(
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
const air_inst = sema.resolveInst(zir_ref);
return sema.analyzeAsType(block, src, air_inst);
const ty = try sema.analyzeAsType(block, src, air_inst);
if (ty.tag() == .generic_poison) return error.GenericPoison;
return ty;
}
fn analyzeAsType(
@@ -1284,10 +1286,10 @@ fn resolveInt(
block: *Block,
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
dest_type: Type,
dest_ty: Type,
) !u64 {
const air_inst = sema.resolveInst(zir_ref);
const coerced = try sema.coerce(block, dest_type, air_inst, src);
const coerced = try sema.coerce(block, dest_ty, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced);
return val.toUnsignedInt();
@@ -2403,6 +2405,19 @@ fn failWithBadUnionFieldAccess(
return sema.failWithOwnedErrorMsg(msg);
}
fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
const src_loc = decl_ty.declSrcLocOrNull() orelse return;
const category = switch (decl_ty.zigTypeTag()) {
.Union => "union",
.Struct => "struct",
.Enum => "enum",
.Opaque => "opaque",
.ErrorSet => "error set",
else => unreachable,
};
try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category});
}
fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -5059,9 +5074,9 @@ fn analyzeAs(
zir_dest_type: Zir.Inst.Ref,
zir_operand: Zir.Inst.Ref,
) CompileError!Air.Inst.Ref {
const dest_type = try sema.resolveType(block, src, zir_dest_type);
const dest_ty = try sema.resolveType(block, src, zir_dest_type);
const operand = sema.resolveInst(zir_operand);
return sema.coerce(block, dest_type, operand, src);
return sema.coerce(block, dest_ty, operand, src);
}
fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5175,21 +5190,21 @@ fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_type);
const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty);
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
if (try sema.isComptimeKnown(block, operand_src, operand)) {
return sema.coerce(block, dest_type, operand, operand_src);
return sema.coerce(block, dest_ty, operand, operand_src);
} else if (dest_is_comptime_int) {
return sema.fail(block, src, "unable to cast runtime value to 'comptime_int'", .{});
}
try sema.requireRuntimeBlock(block, operand_src);
// TODO insert safety check to make sure the value fits in the dest type
return block.addTyOp(.intcast, dest_type, operand);
return block.addTyOp(.intcast, dest_ty, operand);
}
fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5201,9 +5216,9 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
return sema.bitCast(block, dest_type, operand, operand_src);
return sema.bitCast(block, dest_ty, operand, operand_src);
}
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5216,17 +5231,17 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
const dest_is_comptime_float = switch (dest_type.zigTypeTag()) {
const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) {
.ComptimeFloat => true,
.Float => false,
else => return sema.fail(
block,
dest_ty_src,
"expected float type, found '{}'",
.{dest_type},
.{dest_ty},
),
};
@@ -5242,19 +5257,19 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
if (try sema.isComptimeKnown(block, operand_src, operand)) {
return sema.coerce(block, dest_type, operand, operand_src);
return sema.coerce(block, dest_ty, operand, operand_src);
}
if (dest_is_comptime_float) {
return sema.fail(block, src, "unable to cast runtime value to 'comptime_float'", .{});
}
const target = sema.mod.getTarget();
const src_bits = operand_ty.floatBits(target);
const dst_bits = dest_type.floatBits(target);
const dst_bits = dest_ty.floatBits(target);
if (dst_bits >= src_bits) {
return sema.coerce(block, dest_type, operand, operand_src);
return sema.coerce(block, dest_ty, operand, operand_src);
}
try sema.requireRuntimeBlock(block, operand_src);
return block.addTyOp(.fptrunc, dest_type, operand);
return block.addTyOp(.fptrunc, dest_ty, operand);
}
fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -11265,60 +11280,60 @@ fn elemPtrArray(
fn coerce(
sema: *Sema,
block: *Block,
dest_type_unresolved: Type,
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
switch (dest_type_unresolved.tag()) {
switch (dest_ty_unresolved.tag()) {
.var_args_param => return sema.coerceVarArgParam(block, inst, inst_src),
.generic_poison => return inst,
else => {},
}
const dest_type_src = inst_src; // TODO better source location
const dest_type = try sema.resolveTypeFields(block, dest_type_src, dest_type_unresolved);
const dest_ty_src = inst_src; // TODO better source location
const dest_ty = try sema.resolveTypeFields(block, dest_ty_src, dest_ty_unresolved);
const inst_ty = sema.typeOf(inst);
// If the types are the same, we can return the operand.
if (dest_type.eql(inst_ty))
if (dest_ty.eql(inst_ty))
return inst;
const arena = sema.arena;
const target = sema.mod.getTarget();
const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false, target);
const in_memory_result = coerceInMemoryAllowed(dest_ty, inst_ty, false, target);
if (in_memory_result == .ok) {
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
// Keep the comptime Value representation; take the new type.
return sema.addConstant(dest_type, val);
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.bitcast, dest_type, inst);
return block.addTyOp(.bitcast, dest_ty, inst);
}
// undefined to anything
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) {
return sema.addConstant(dest_type, val);
return sema.addConstant(dest_ty, val);
}
}
assert(inst_ty.zigTypeTag() != .Undefined);
// comptime known number to other number
if (try sema.coerceNum(block, dest_type, inst, inst_src)) |some|
if (try sema.coerceNum(block, dest_ty, inst, inst_src)) |some|
return some;
switch (dest_type.zigTypeTag()) {
switch (dest_ty.zigTypeTag()) {
.Optional => {
// null to ?T
if (inst_ty.zigTypeTag() == .Null) {
return sema.addConstant(dest_type, Value.initTag(.null_value));
return sema.addConstant(dest_ty, Value.initTag(.null_value));
}
// T to ?T
var buf: Type.Payload.ElemType = undefined;
const child_type = dest_type.optionalChild(&buf);
const child_type = dest_ty.optionalChild(&buf);
const intermediate = try sema.coerce(block, child_type, inst, inst_src);
return sema.wrapOptional(block, dest_type, intermediate, inst_src);
return sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
.Pointer => {
// Function body to function pointer.
@@ -11326,7 +11341,7 @@ fn coerce(
const fn_val = try sema.resolveConstValue(block, inst_src, inst);
const fn_decl = fn_val.castTag(.function).?.data.owner_decl;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
return sema.coerce(block, dest_type, inst_as_ptr, inst_src);
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
// Coercions where the source is a single pointer to an array.
@@ -11335,38 +11350,38 @@ fn coerce(
const array_type = inst_ty.elemType();
if (array_type.zigTypeTag() != .Array) break :src_array_ptr;
const array_elem_type = array_type.elemType();
const dest_is_mut = !dest_type.isConstPtr();
const dest_is_mut = !dest_ty.isConstPtr();
if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr;
if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr;
if (inst_ty.ptrAddressSpace() != dest_type.ptrAddressSpace()) break :src_array_ptr;
if (inst_ty.isVolatilePtr() and !dest_ty.isVolatilePtr()) break :src_array_ptr;
if (inst_ty.ptrAddressSpace() != dest_ty.ptrAddressSpace()) break :src_array_ptr;
const dst_elem_type = dest_type.elemType();
const dst_elem_type = dest_ty.elemType();
switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) {
.ok => {},
.no_match => break :src_array_ptr,
}
switch (dest_type.ptrSize()) {
switch (dest_ty.ptrSize()) {
.Slice => {
// *[N]T to []T
return sema.coerceArrayPtrToSlice(block, dest_type, inst, inst_src);
return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src);
},
.C => {
// *[N]T to [*c]T
return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src);
return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
},
.Many => {
// *[N]T to [*]T
// *[N:s]T to [*:s]T
// *[N:s]T to [*]T
if (dest_type.sentinel()) |dst_sentinel| {
if (dest_ty.sentinel()) |dst_sentinel| {
if (array_type.sentinel()) |src_sentinel| {
if (src_sentinel.eql(dst_sentinel, dst_elem_type)) {
return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src);
return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
}
}
} else {
return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src);
return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
}
},
.One => {},
@@ -11378,14 +11393,14 @@ fn coerce(
if (inst_ty.zigTypeTag() == .Int) {
assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above
const dst_info = dest_type.intInfo(target);
const dst_info = dest_ty.intInfo(target);
const src_info = inst_ty.intInfo(target);
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
{
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.intcast, dest_type, inst);
return block.addTyOp(.intcast, dest_ty, inst);
}
}
},
@@ -11395,10 +11410,10 @@ fn coerce(
assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above
const src_bits = inst_ty.floatBits(target);
const dst_bits = dest_type.floatBits(target);
const dst_bits = dest_ty.floatBits(target);
if (dst_bits >= src_bits) {
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.fpext, dest_type, inst);
return block.addTyOp(.fpext, dest_ty, inst);
}
}
},
@@ -11407,7 +11422,7 @@ fn coerce(
// enum literal to enum
const val = try sema.resolveConstValue(block, inst_src, inst);
const bytes = val.castTag(.enum_literal).?.data;
const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type);
const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_ty);
const field_index = resolved_dest_type.enumFieldIndex(bytes) orelse {
const msg = msg: {
const msg = try sema.errMsg(
@@ -11435,20 +11450,24 @@ fn coerce(
.Union => blk: {
// union to its own tag type
const union_tag_ty = inst_ty.unionTagType() orelse break :blk;
if (union_tag_ty.eql(dest_type)) {
return sema.unionToTag(block, dest_type, inst, inst_src);
if (union_tag_ty.eql(dest_ty)) {
return sema.unionToTag(block, dest_ty, inst, inst_src);
}
},
else => {},
},
.ErrorUnion => {
// T to E!T or E to E!T
return sema.wrapErrorUnion(block, dest_type, inst, inst_src);
return sema.wrapErrorUnion(block, dest_ty, inst, inst_src);
},
.Union => switch (inst_ty.zigTypeTag()) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
else => {},
}
return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_type, inst_ty });
return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty });
}
const InMemoryCoercionResult = enum {
@@ -11467,14 +11486,14 @@ const InMemoryCoercionResult = enum {
/// * sentinel-terminated pointers can coerce into `[*]`
/// TODO improve this function to report recursive compile errors like it does in stage1.
/// look at the function types_match_const_cast_only
fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult {
if (dest_type.eql(src_type))
fn coerceInMemoryAllowed(dest_ty: Type, src_type: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult {
if (dest_ty.eql(src_type))
return .ok;
if (dest_type.zigTypeTag() == .Pointer and
if (dest_ty.zigTypeTag() == .Pointer and
src_type.zigTypeTag() == .Pointer)
{
const dest_info = dest_type.ptrInfo().data;
const dest_info = dest_ty.ptrInfo().data;
const src_info = src_type.ptrInfo().data;
const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target);
@@ -11514,7 +11533,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool, tar
return .no_match;
}
if (dest_type.hasCodeGenBits() != src_type.hasCodeGenBits()) {
if (dest_ty.hasCodeGenBits() != src_type.hasCodeGenBits()) {
return .no_match;
}
@@ -11532,7 +11551,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool, tar
!dest_info.pointee_type.eql(src_info.pointee_type))
{
const src_align = src_type.ptrAlignment(target);
const dest_align = dest_type.ptrAlignment(target);
const dest_align = dest_ty.ptrAlignment(target);
if (dest_align > src_align) {
return .no_match;
@@ -11550,14 +11569,14 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool, tar
fn coerceNum(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!?Air.Inst.Ref {
const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse return null;
const inst_ty = sema.typeOf(inst);
const src_zig_tag = inst_ty.zigTypeTag();
const dst_zig_tag = dest_type.zigTypeTag();
const dst_zig_tag = dest_ty.zigTypeTag();
const target = sema.mod.getTarget();
@@ -11565,37 +11584,37 @@ fn coerceNum(
.ComptimeInt, .Int => switch (src_zig_tag) {
.Float, .ComptimeFloat => {
if (val.floatHasFraction()) {
return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_type });
return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_ty });
}
return sema.fail(block, inst_src, "TODO float to int", .{});
},
.Int, .ComptimeInt => {
if (!val.intFitsInType(dest_type, target)) {
return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val });
if (!val.intFitsInType(dest_ty, target)) {
return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty, val });
}
return try sema.addConstant(dest_type, val);
return try sema.addConstant(dest_ty, val);
},
else => {},
},
.ComptimeFloat, .Float => switch (src_zig_tag) {
.ComptimeFloat => {
const result_val = try val.floatCast(sema.arena, dest_type);
return try sema.addConstant(dest_type, result_val);
const result_val = try val.floatCast(sema.arena, dest_ty);
return try sema.addConstant(dest_ty, result_val);
},
.Float => {
const result_val = try val.floatCast(sema.arena, dest_type);
if (!val.eql(result_val, dest_type)) {
const result_val = try val.floatCast(sema.arena, dest_ty);
if (!val.eql(result_val, dest_ty)) {
return sema.fail(
block,
inst_src,
"type {} cannot represent float value {}",
.{ dest_type, val },
.{ dest_ty, val },
);
}
return try sema.addConstant(dest_type, result_val);
return try sema.addConstant(dest_ty, result_val);
},
.Int, .ComptimeInt => {
const result_val = try val.intToFloat(sema.arena, dest_type, target);
const result_val = try val.intToFloat(sema.arena, dest_ty, target);
// TODO implement this compile error
//const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
//if (!int_again_val.eql(val, inst_ty)) {
@@ -11603,10 +11622,10 @@ fn coerceNum(
// block,
// inst_src,
// "type {} cannot represent integer value {}",
// .{ dest_type, val },
// .{ dest_ty, val },
// );
//}
return try sema.addConstant(dest_type, result_val);
return try sema.addConstant(dest_ty, result_val);
},
else => {},
},
@@ -11816,31 +11835,66 @@ fn beginComptimePtrMutation(
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
var parent = try beginComptimePtrMutation(sema, block, src, field_ptr.container_ptr);
const field_ty = parent.ty.structFieldType(field_ptr.field_index);
const field_index = @intCast(u32, field_ptr.field_index);
const field_ty = parent.ty.structFieldType(field_index);
switch (parent.val.tag()) {
.undef => {
// A struct has been initialized to undefined at comptime and now we
// A struct or union has been initialized to undefined at comptime and now we
// are for the first time setting a field. We must change the representation
// of the struct from `undef` to `struct`.
// of the struct/union from `undef` to `struct`/`union`.
const arena = parent.beginArena(sema.gpa);
defer parent.finishArena();
const fields = try arena.alloc(Value, parent.ty.structFieldCount());
mem.set(Value, fields, Value.undef);
switch (parent.ty.zigTypeTag()) {
.Struct => {
const fields = try arena.alloc(Value, parent.ty.structFieldCount());
mem.set(Value, fields, Value.undef);
parent.val.* = try Value.Tag.@"struct".create(arena, fields);
parent.val.* = try Value.Tag.@"struct".create(arena, fields);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.val = &fields[field_ptr.field_index],
.ty = field_ty,
};
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.val = &fields[field_index],
.ty = field_ty,
};
},
.Union => {
const payload = try arena.create(Value.Payload.Union);
payload.* = .{ .data = .{
.tag = try Value.Tag.enum_field_index.create(arena, field_index),
.val = Value.undef,
} };
parent.val.* = Value.initPayload(&payload.base);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.val = &payload.data.val,
.ty = field_ty,
};
},
else => unreachable,
}
},
.@"struct" => return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.val = &parent.val.castTag(.@"struct").?.data[field_ptr.field_index],
.val = &parent.val.castTag(.@"struct").?.data[field_index],
.ty = field_ty,
},
.@"union" => {
// We need to set the active field of the union.
const arena = parent.beginArena(sema.gpa);
defer parent.finishArena();
const payload = &parent.val.castTag(.@"union").?.data;
payload.tag = try Value.Tag.enum_field_index.create(arena, field_index);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
.val = &payload.val,
.ty = field_ty,
};
},
else => unreachable,
}
@@ -11855,7 +11909,7 @@ fn beginComptimePtrMutation(
fn bitCast(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
@@ -11863,41 +11917,132 @@ fn bitCast(
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
const target = sema.mod.getTarget();
const old_ty = sema.typeOf(inst);
const result_val = try val.bitCast(old_ty, dest_type, target, sema.gpa, sema.arena);
return sema.addConstant(dest_type, result_val);
const result_val = try val.bitCast(old_ty, dest_ty, target, sema.gpa, sema.arena);
return sema.addConstant(dest_ty, result_val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.bitcast, dest_type, inst);
return block.addTyOp(.bitcast, dest_ty, inst);
}
fn coerceArrayPtrToSlice(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_type, val);
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.array_to_slice, dest_type, inst);
return block.addTyOp(.array_to_slice, dest_ty, inst);
}
fn coerceArrayPtrToMany(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_type, val);
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src);
return sema.bitCast(block, dest_type, inst, inst_src);
return sema.bitCast(block, dest_ty, inst, inst_src);
}
fn coerceEnumToUnion(
sema: *Sema,
block: *Block,
union_ty: Type,
union_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const inst_ty = sema.typeOf(inst);
const tag_ty = union_ty.unionTagType() orelse {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{
union_ty, inst_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{});
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src);
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field_index = union_obj.tag_ty.enumTagFieldIndex(val) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "union {} has no tag with value {}", .{
union_ty, val,
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
const field = union_obj.fields.values()[field_index];
const field_ty = try sema.resolveTypeFields(block, inst_src, field.ty);
const opv = (try sema.typeHasOnePossibleValue(block, inst_src, field_ty)) orelse {
// TODO resolve the field names and include in the error message,
// also instead of 'union declared here' make it 'field "foo" declared here'.
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "coercion to union {} must initialize {} field", .{
union_ty, field_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
};
return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{
.tag = val,
.val = opv,
}));
}
try sema.requireRuntimeBlock(block, inst_src);
if (tag_ty.isNonexhaustiveEnum()) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} from non-exhaustive enum", .{
union_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, tag_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
// If the union has all fields 0 bits, the union value is just the enum value.
if (union_ty.unionHasAllZeroBitFieldTypes()) {
return block.addTyOp(.bitcast, union_ty, enum_tag);
}
// TODO resolve the field names and add a hint that says "field 'foo' has type 'bar'"
// instead of the "union declared here" hint
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} which has non-void fields", .{
union_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
fn analyzeDeclVal(
@@ -12223,7 +12368,7 @@ fn cmpNumeric(
const target = sema.mod.getTarget();
if (lhs_is_float and rhs_is_float) {
// Implicit cast the smaller one to the larger one.
const dest_type = x: {
const dest_ty = x: {
if (lhs_ty_tag == .ComptimeFloat) {
break :x rhs_ty;
} else if (rhs_ty_tag == .ComptimeFloat) {
@@ -12235,8 +12380,8 @@ fn cmpNumeric(
break :x rhs_ty;
}
};
const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs_src);
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
}
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
@@ -12327,7 +12472,7 @@ fn cmpNumeric(
rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed);
}
const dest_type = if (dest_float_type) |ft| ft else blk: {
const dest_ty = if (dest_float_type) |ft| ft else blk: {
const max_bits = std.math.max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) {
error.Overflow => return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}),
@@ -12335,8 +12480,8 @@ fn cmpNumeric(
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
break :blk try Module.makeIntType(sema.arena, signedness, casted_bits);
};
const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs_src);
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
}
@@ -12344,32 +12489,32 @@ fn cmpNumeric(
fn wrapOptional(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
return sema.addConstant(dest_type, try Value.Tag.opt_payload.create(sema.arena, val));
return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val));
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.wrap_optional, dest_type, inst);
return block.addTyOp(.wrap_optional, dest_ty, inst);
}
fn wrapErrorUnion(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const inst_ty = sema.typeOf(inst);
const dest_err_set_ty = dest_type.errorUnionSet();
const dest_payload_ty = dest_type.errorUnionPayload();
const dest_err_set_ty = dest_ty.errorUnionSet();
const dest_payload_ty = dest_ty.errorUnionPayload();
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
if (inst_ty.zigTypeTag() != .ErrorSet) {
_ = try sema.coerce(block, dest_payload_ty, inst, inst_src);
return sema.addConstant(dest_type, try Value.Tag.eu_payload.create(sema.arena, val));
return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val));
}
switch (dest_err_set_ty.tag()) {
.anyerror => {},
@@ -12417,7 +12562,7 @@ fn wrapErrorUnion(
},
else => unreachable,
}
return sema.addConstant(dest_type, val);
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src);
@@ -12425,25 +12570,25 @@ fn wrapErrorUnion(
// we are coercing from E to E!T
if (inst_ty.zigTypeTag() == .ErrorSet) {
var coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src);
return block.addTyOp(.wrap_errunion_err, dest_type, coerced);
return block.addTyOp(.wrap_errunion_err, dest_ty, coerced);
} else {
var coerced = try sema.coerce(block, dest_payload_ty, inst, inst_src);
return block.addTyOp(.wrap_errunion_payload, dest_type, coerced);
return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced);
}
}
fn unionToTag(
sema: *Sema,
block: *Block,
dest_type: Type,
dest_ty: Type,
un: Air.Inst.Ref,
un_src: LazySrcLoc,
) !Air.Inst.Ref {
if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| {
return sema.addConstant(dest_type, un_val.unionTag());
return sema.addConstant(dest_ty, un_val.unionTag());
}
try sema.requireRuntimeBlock(block, un_src);
return block.addTyOp(.get_union_tag, dest_type, un);
return block.addTyOp(.get_union_tag, dest_ty, un);
}
fn resolvePeerTypes(

View File

@@ -848,27 +848,79 @@ pub const DeclGen = struct {
return llvm_struct_ty;
},
.Union => {
const union_obj = t.castTag(.@"union").?.data;
assert(union_obj.haveFieldTypes());
const gop = try dg.object.type_map.getOrPut(gpa, t);
if (gop.found_existing) return gop.value_ptr.*;
const enum_tag_ty = union_obj.tag_ty;
const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty);
if (union_obj.onlyTagHasCodegenBits()) {
return enum_tag_llvm_ty;
}
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
const union_obj = t.cast(Type.Payload.Union).?.data;
const target = dg.module.getTarget();
const most_aligned_field_index = union_obj.mostAlignedField(target);
const most_aligned_field = union_obj.fields.values()[most_aligned_field_index];
// TODO handle when the most aligned field is different than the
// biggest sized field.
if (t.unionTagType()) |enum_tag_ty| {
const enum_tag_llvm_ty = try dg.llvmType(enum_tag_ty);
const layout = union_obj.getLayout(target, true);
const llvm_fields = [_]*const llvm.Type{
try dg.llvmType(most_aligned_field.ty),
enum_tag_llvm_ty,
};
return dg.context.structType(&llvm_fields, llvm_fields.len, .False);
if (layout.payload_size == 0) {
gop.value_ptr.* = enum_tag_llvm_ty;
return enum_tag_llvm_ty;
}
const name = try union_obj.getFullyQualifiedName(gpa);
defer gpa.free(name);
const llvm_union_ty = dg.context.structCreateNamed(name);
gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls
const aligned_field = union_obj.fields.values()[layout.most_aligned_field];
const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty);
const llvm_payload_ty = t: {
if (layout.most_aligned_field_size == layout.payload_size) {
break :t llvm_aligned_field_ty;
}
const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size);
const fields: [2]*const llvm.Type = .{
llvm_aligned_field_ty,
dg.context.intType(8).arrayType(padding_len),
};
break :t dg.context.structType(&fields, fields.len, .False);
};
if (layout.tag_size == 0) {
var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty};
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
return llvm_union_ty;
}
// Put the tag before or after the payload depending on which one's
// alignment is greater.
var llvm_fields: [2]*const llvm.Type = undefined;
if (layout.tag_align >= layout.payload_align) {
llvm_fields[0] = enum_tag_llvm_ty;
llvm_fields[1] = llvm_payload_ty;
} else {
llvm_fields[0] = llvm_payload_ty;
llvm_fields[1] = enum_tag_llvm_ty;
}
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
return llvm_union_ty;
}
// Untagged union
const layout = union_obj.getLayout(target, false);
const name = try union_obj.getFullyQualifiedName(gpa);
defer gpa.free(name);
const llvm_union_ty = dg.context.structCreateNamed(name);
gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls
const big_field = union_obj.fields.values()[layout.biggest_field];
const llvm_big_field_ty = try dg.llvmType(big_field.ty);
var llvm_fields: [1]*const llvm.Type = .{llvm_big_field_ty};
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
return llvm_union_ty;
},
.Fn => {
const fn_info = t.fnInfo();
@@ -983,36 +1035,8 @@ pub const DeclGen = struct {
return int.constBitCast(llvm_ty);
},
.Pointer => switch (tv.val.tag()) {
.decl_ref => {
if (tv.ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = tv.val.sliceLen(),
};
const fields: [2]*const llvm.Value = .{
try self.genTypedValue(.{
.ty = ptr_ty,
.val = tv.val,
}),
try self.genTypedValue(.{
.ty = Type.initTag(.usize),
.val = Value.initPayload(&slice_len.base),
}),
};
return self.context.constStruct(&fields, fields.len, .False);
} else {
const decl = tv.val.castTag(.decl_ref).?.data;
decl.alive = true;
const llvm_type = try self.llvmType(tv.ty);
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
try self.resolveLlvmFunction(decl)
else
try self.resolveGlobalDecl(decl);
return llvm_val.constBitCast(llvm_type);
}
},
.decl_ref_mut => return lowerDeclRefValue(self, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
.decl_ref => return lowerDeclRefValue(self, tv, tv.val.castTag(.decl_ref).?.data),
.variable => {
const decl = tv.val.castTag(.variable).?.data.owner_decl;
decl.alive = true;
@@ -1192,6 +1216,49 @@ pub const DeclGen = struct {
@intCast(c_uint, llvm_fields.items.len),
);
},
.Union => {
const llvm_union_ty = try self.llvmType(tv.ty);
const tag_and_val = tv.val.castTag(.@"union").?.data;
const target = self.module.getTarget();
const layout = tv.ty.unionGetLayout(target);
if (layout.payload_size == 0) {
return genTypedValue(self, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag });
}
const field_ty = tv.ty.unionFieldType(tag_and_val.tag);
const payload = p: {
const field = try genTypedValue(self, .{ .ty = field_ty, .val = tag_and_val.val });
const field_size = field_ty.abiSize(target);
if (field_size == layout.payload_size) {
break :p field;
}
const padding_len = @intCast(c_uint, layout.payload_size - field_size);
const fields: [2]*const llvm.Value = .{
field, self.context.intType(8).arrayType(padding_len).getUndef(),
};
break :p self.context.constStruct(&fields, fields.len, .False);
};
if (layout.tag_size == 0) {
const llvm_payload_ty = llvm_union_ty.structGetTypeAtIndex(0);
const fields: [1]*const llvm.Value = .{payload.constBitCast(llvm_payload_ty)};
return llvm_union_ty.constNamedStruct(&fields, fields.len);
}
const llvm_tag_value = try genTypedValue(self, .{
.ty = tv.ty.unionTagType().?,
.val = tag_and_val.tag,
});
var fields: [2]*const llvm.Value = undefined;
if (layout.tag_align >= layout.payload_align) {
fields[0] = llvm_tag_value;
fields[1] = payload.constBitCast(llvm_union_ty.structGetTypeAtIndex(1));
} else {
fields[0] = payload.constBitCast(llvm_union_ty.structGetTypeAtIndex(0));
fields[1] = llvm_tag_value;
}
return llvm_union_ty.constNamedStruct(&fields, fields.len);
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
.Type => unreachable,
@@ -1203,7 +1270,6 @@ pub const DeclGen = struct {
.BoundFn => unreachable,
.Opaque => unreachable,
.Union,
.Frame,
.AnyFrame,
.Vector,
@@ -1211,6 +1277,40 @@ pub const DeclGen = struct {
}
}
fn lowerDeclRefValue(
self: *DeclGen,
tv: TypedValue,
decl: *Module.Decl,
) Error!*const llvm.Value {
if (tv.ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = tv.val.sliceLen(),
};
const fields: [2]*const llvm.Value = .{
try self.genTypedValue(.{
.ty = ptr_ty,
.val = tv.val,
}),
try self.genTypedValue(.{
.ty = Type.initTag(.usize),
.val = Value.initPayload(&slice_len.base),
}),
};
return self.context.constStruct(&fields, fields.len, .False);
}
decl.alive = true;
const llvm_type = try self.llvmType(tv.ty);
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
try self.resolveLlvmFunction(decl)
else
try self.resolveGlobalDecl(decl);
return llvm_val.constBitCast(llvm_type);
}
fn addAttr(dg: DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
return dg.addAttrInt(val, index, name, 0);
}
@@ -2917,25 +3017,45 @@ pub const FuncGen = struct {
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const un_ty = self.air.typeOf(bin_op.lhs).childType();
const target = self.dg.module.getTarget();
const layout = un_ty.unionGetLayout(target);
if (layout.tag_size == 0) return null;
const union_ptr = try self.resolveInst(bin_op.lhs);
// TODO handle when onlyTagHasCodegenBits() == true
const new_tag = try self.resolveInst(bin_op.rhs);
const tag_field_ptr = self.builder.buildStructGEP(union_ptr, 1, "");
if (layout.payload_size == 0) {
_ = self.builder.buildStore(new_tag, union_ptr);
return null;
}
const tag_index = @boolToInt(layout.tag_align < layout.payload_align);
const tag_field_ptr = self.builder.buildStructGEP(union_ptr, tag_index, "");
_ = self.builder.buildStore(new_tag, tag_field_ptr);
return null;
}
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.air.typeOf(ty_op.operand);
const un = try self.resolveInst(ty_op.operand);
_ = un_ty; // TODO handle when onlyTagHasCodegenBits() == true and other union forms
return self.builder.buildExtractValue(un, 1, "");
const target = self.dg.module.getTarget();
const layout = un_ty.unionGetLayout(target);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolveInst(ty_op.operand);
if (isByRef(un_ty)) {
if (layout.payload_size == 0) {
return self.builder.buildLoad(union_handle, "");
}
const tag_index = @boolToInt(layout.tag_align < layout.payload_align);
const tag_field_ptr = self.builder.buildStructGEP(union_handle, tag_index, "");
return self.builder.buildLoad(tag_field_ptr, "");
} else {
if (layout.payload_size == 0) {
return union_handle;
}
const tag_index = @boolToInt(layout.tag_align < layout.payload_align);
return self.builder.buildExtractValue(union_handle, tag_index, "");
}
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, prefix: [*:0]const u8) !?*const llvm.Value {
@@ -3004,7 +3124,10 @@ pub const FuncGen = struct {
if (!field.ty.hasCodeGenBits()) {
return null;
}
const union_field_ptr = self.builder.buildStructGEP(union_ptr, 0, "");
const target = self.dg.module.getTarget();
const layout = union_ty.unionGetLayout(target);
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const union_field_ptr = self.builder.buildStructGEP(union_ptr, payload_index, "");
return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
}

View File

@@ -220,6 +220,9 @@ pub const Type = opaque {
Packed: Bool,
) void;
pub const structGetTypeAtIndex = LLVMStructGetTypeAtIndex;
extern fn LLVMStructGetTypeAtIndex(StructTy: *const Type, i: c_uint) *const Type;
pub const getTypeKind = LLVMGetTypeKind;
extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind;
};

View File

@@ -1238,7 +1238,6 @@ pub const Type = extern union {
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.anyerror_void_error_union,
.empty_struct_literal,
@@ -1249,8 +1248,14 @@ pub const Type = extern union {
.error_set_inferred,
.@"opaque",
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
.int_signed,
.int_unsigned,
.enum_simple,
=> false,
.single_const_pointer_to_comptime_int,
.type,
.comptime_int,
.comptime_float,
@@ -1263,8 +1268,6 @@ pub const Type = extern union {
.inferred_alloc_const => unreachable,
.bound_fn => unreachable,
.array_u8,
.array_u8_sentinel_0,
.array,
.array_sentinel,
.vector,
@@ -1277,17 +1280,21 @@ pub const Type = extern union {
.c_mut_pointer,
.const_slice,
.mut_slice,
.int_signed,
.int_unsigned,
=> return requiresComptime(childType(ty)),
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
=> {
var buf: Payload.ElemType = undefined;
return requiresComptime(optionalChild(ty, &buf));
},
.error_union,
.anyframe_T,
.@"struct",
.@"union",
.union_tagged,
.enum_simple,
.enum_numbered,
.enum_full,
.enum_nonexhaustive,
@@ -2568,6 +2575,24 @@ pub const Type = extern union {
return union_obj.fields.values()[index].ty;
}
pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool {
return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes();
}
pub fn unionGetLayout(ty: Type, target: Target) Module.Union.Layout {
switch (ty.tag()) {
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
return union_obj.getLayout(target, false);
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
return union_obj.getLayout(target, true);
},
else => unreachable,
}
}
/// Asserts that the type is an error union.
pub fn errorUnionPayload(self: Type) Type {
return switch (self.tag()) {
@@ -3361,17 +3386,26 @@ pub const Type = extern union {
}
}
/// Supports structs and unions.
pub fn structFieldType(ty: Type, index: usize) Type {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
return struct_obj.fields.values()[index].ty;
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.fields.values()[index].ty;
},
else => unreachable,
}
}
pub fn declSrcLoc(ty: Type) Module.SrcLoc {
return declSrcLocOrNull(ty).?;
}
pub fn declSrcLocOrNull(ty: Type) ?Module.SrcLoc {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
@@ -3404,8 +3438,9 @@ pub const Type = extern union {
.export_options,
.extern_options,
.type_info,
=> @panic("TODO resolve std.builtin types"),
else => unreachable,
=> unreachable, // needed to call resolveTypeFields first
else => return null,
}
}

View File

@@ -32,3 +32,42 @@ fn setFloat(foo: *Foo, x: f64) void {
fn setInt(foo: *Foo, x: i32) void {
foo.* = Foo{ .int = x };
}
test "comptime union field access" {
comptime {
var foo = Foo{ .int = 0 };
try expect(foo.int == 0);
foo = Foo{ .float = 42.42 };
try expect(foo.float == 42.42);
}
}
const FooExtern = extern union {
float: f64,
int: i32,
};
test "basic extern unions" {
var foo = FooExtern{ .int = 1 };
try expect(foo.int == 1);
foo.float = 12.34;
try expect(foo.float == 12.34);
}
const ExternPtrOrInt = extern union {
ptr: *u8,
int: u64,
};
test "extern union size" {
comptime try expect(@sizeOf(ExternPtrOrInt) == 8);
}
test "0-sized extern union definition" {
const U = extern union {
a: void,
const f = 1;
};
try expect(U.f == 1);
}

View File

@@ -34,33 +34,6 @@ test "unions embedded in aggregate types" {
}
}
const Foo = union {
float: f64,
int: i32,
};
test "comptime union field access" {
comptime {
var foo = Foo{ .int = 0 };
try expect(foo.int == 0);
foo = Foo{ .float = 42.42 };
try expect(foo.float == 42.42);
}
}
const FooExtern = extern union {
float: f64,
int: i32,
};
test "basic extern unions" {
var foo = FooExtern{ .int = 1 };
try expect(foo.int == 1);
foo.float = 12.34;
try expect(foo.float == 12.34);
}
const Letter = enum { A, B, C };
const Payload = union(Letter) {
A: i32,
@@ -131,19 +104,11 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
});
}
const ExternPtrOrInt = extern union {
ptr: *u8,
int: u64,
};
test "extern union size" {
comptime try expect(@sizeOf(ExternPtrOrInt) == 8);
}
const PackedPtrOrInt = packed union {
ptr: *u8,
int: u64,
};
test "extern union size" {
test "packed union size" {
comptime try expect(@sizeOf(PackedPtrOrInt) == 8);
}
@@ -576,15 +541,6 @@ test "function call result coerces from tagged union to the tag" {
comptime try S.doTheTest();
}
test "0-sized extern union definition" {
const U = extern union {
a: void,
const f = 1;
};
try expect(U.f == 1);
}
test "union initializer generates padding only if needed" {
const U = union(enum) {
A: u24,
@@ -769,6 +725,7 @@ test "union enum type gets a separate scope" {
try S.doTheTest();
}
test "anytype union field: issue #9233" {
const Quux = union(enum) { bar: anytype };
_ = Quux;
@@ -845,7 +802,7 @@ const TaggedUnionWithPayload = union(enum) {
Full: i32,
};
test "enum alignment" {
test "union alignment" {
comptime {
try expect(@alignOf(AlignTestTaggedUnion) >= @alignOf([9]u8));
try expect(@alignOf(AlignTestTaggedUnion) >= @alignOf(u64));