Merge pull request #12143 from Vexu/stage2-safety

Stage2 runtime safety progress
This commit is contained in:
Andrew Kelley
2022-07-23 20:09:24 -07:00
committed by GitHub
85 changed files with 1387 additions and 443 deletions

View File

@@ -813,6 +813,7 @@ test "conversion to f32" {
test "conversion to f80" {
if (builtin.zig_backend == .stage1 and builtin.cpu.arch != .x86_64)
return error.SkipZigTest; // https://github.com/ziglang/zig/issues/11408
if (std.debug.runtime_safety) return error.SkipZigTest;
const intToFloat = @import("./int_to_float.zig").intToFloat;

View File

@@ -38,11 +38,15 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
add,
/// Same as `add` with optimized float mode.
add_optimized,
/// Integer addition. Wrapping is defined to be twos complement wrapping.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
addwrap,
/// Same as `addwrap` with optimized float mode.
addwrap_optimized,
/// Saturating integer addition.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -53,11 +57,15 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
sub,
/// Same as `sub` with optimized float mode.
sub_optimized,
/// Integer subtraction. Wrapping is defined to be twos complement wrapping.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
subwrap,
/// Same as `sub` with optimized float mode.
subwrap_optimized,
/// Saturating integer subtraction.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -68,11 +76,15 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
mul,
/// Same as `mul` with optimized float mode.
mul_optimized,
/// Integer multiplication. Wrapping is defined to be twos complement wrapping.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
mulwrap,
/// Same as `mulwrap` with optimized float mode.
mulwrap_optimized,
/// Saturating integer multiplication.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -83,32 +95,44 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
div_float,
/// Same as `div_float` with optimized float mode.
div_float_optimized,
/// Truncating integer or float division. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
div_trunc,
/// Same as `div_trunc` with optimized float mode.
div_trunc_optimized,
/// Flooring integer or float division. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
div_floor,
/// Same as `div_floor` with optimized float mode.
div_floor_optimized,
/// Integer or float division. Guaranteed no remainder.
/// For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
div_exact,
/// Same as `div_exact` with optimized float mode.
div_exact_optimized,
/// Integer or float remainder division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
rem,
/// Same as `rem` with optimized float mode.
rem_optimized,
/// Integer or float modulus division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
mod,
/// Same as `mod` with optimized float mode.
mod_optimized,
/// Add an offset to a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is undefined behavior.
@@ -293,29 +317,45 @@ pub const Inst = struct {
/// LHS of zero.
/// Uses the `un_op` field.
neg,
/// Same as `neg` with optimized float mode.
neg_optimized,
/// `<`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_lt,
/// Same as `cmp_lt` with optimized float mode.
cmp_lt_optimized,
/// `<=`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_lte,
/// Same as `cmp_lte` with optimized float mode.
cmp_lte_optimized,
/// `==`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_eq,
/// Same as `cmp_eq` with optimized float mode.
cmp_eq_optimized,
/// `>=`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_gte,
/// Same as `cmp_gte` with optimized float mode.
cmp_gte_optimized,
/// `>`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_gt,
/// Same as `cmp_gt` with optimized float mode.
cmp_gt_optimized,
/// `!=`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_neq,
/// Same as `cmp_neq` with optimized float mode.
cmp_neq_optimized,
/// Conditional between two vectors.
/// Result type is always a vector of bools.
/// Uses the `ty_pl` field, payload is `VectorCmp`.
cmp_vector,
/// Same as `cmp_vector` with optimized float mode.
cmp_vector_optimized,
/// Conditional branch.
/// Result type is always noreturn; no instructions in a block follow this one.
@@ -553,6 +593,8 @@ pub const Inst = struct {
/// Given a float operand, return the integer with the closest mathematical meaning.
/// Uses the `ty_op` field.
float_to_int,
/// Same as `float_to_int` with optimized float mode.
float_to_int_optimized,
/// Given an integer operand, return the float with the closest mathematical meaning.
/// Uses the `ty_op` field.
int_to_float,
@@ -564,6 +606,8 @@ pub const Inst = struct {
/// * min, max, add, mul => integer or float
/// Uses the `reduce` field.
reduce,
/// Same as `reduce` with optimized float mode.
reduce_optimized,
/// Given an integer, bool, float, or pointer operand, return a vector with all elements
/// equal to the scalar value.
/// Uses the `ty_op` field.
@@ -676,25 +720,25 @@ pub const Inst = struct {
/// Sets the operand as the current error return trace,
set_err_return_trace,
pub fn fromCmpOp(op: std.math.CompareOperator) Tag {
return switch (op) {
.lt => .cmp_lt,
.lte => .cmp_lte,
.eq => .cmp_eq,
.gte => .cmp_gte,
.gt => .cmp_gt,
.neq => .cmp_neq,
};
pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag {
switch (op) {
.lt => return if (optimized) .cmp_lt_optimized else .cmp_lt,
.lte => return if (optimized) .cmp_lte_optimized else .cmp_lte,
.eq => return if (optimized) .cmp_eq_optimized else .cmp_eq,
.gte => return if (optimized) .cmp_gte_optimized else .cmp_gte,
.gt => return if (optimized) .cmp_gt_optimized else .cmp_gt,
.neq => return if (optimized) .cmp_neq_optimized else .cmp_neq,
}
}
pub fn toCmpOp(tag: Tag) ?std.math.CompareOperator {
return switch (tag) {
.cmp_lt => .lt,
.cmp_lte => .lte,
.cmp_eq => .eq,
.cmp_gte => .gte,
.cmp_gt => .gt,
.cmp_neq => .neq,
.cmp_lt, .cmp_lt_optimized => .lt,
.cmp_lte, .cmp_lte_optimized => .lte,
.cmp_eq, .cmp_eq_optimized => .eq,
.cmp_gte, .cmp_gte_optimized => .gte,
.cmp_gt, .cmp_gt_optimized => .gt,
.cmp_neq, .cmp_neq_optimized => .neq,
else => null,
};
}
@@ -959,6 +1003,18 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.max,
.bool_and,
.bool_or,
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
=> return air.typeOf(datas[inst].bin_op.lhs),
.sqrt,
@@ -976,6 +1032,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.round,
.trunc_float,
.neg,
.neg_optimized,
=> return air.typeOf(datas[inst].un_op),
.cmp_lt,
@@ -984,6 +1041,12 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.cmp_gte,
.cmp_gt,
.cmp_neq,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_lt_errors_len,
.is_null,
.is_non_null,
@@ -1018,6 +1081,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.union_init,
.field_parent_ptr,
.cmp_vector,
.cmp_vector_optimized,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
@@ -1054,6 +1118,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.float_to_int_optimized,
.int_to_float,
.splat,
.get_union_tag,
@@ -1129,7 +1194,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
return ptr_ty.elemType();
},
.reduce => return air.typeOf(datas[inst].reduce.operand).childType(),
.reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(),
.mul_add => return air.typeOf(datas[inst].pl_op.operand),
.select => {

View File

@@ -1589,13 +1589,12 @@ fn structInitExpr(
switch (rl) {
.discard => {
// TODO if a type expr is given the fields should be validated for that type
if (struct_init.ast.type_expr != 0) {
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
_ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node);
}
for (struct_init.ast.fields) |field_init| {
_ = try expr(gz, scope, .discard, field_init);
_ = try structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init);
} else {
_ = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon);
}
return Zir.Inst.Ref.void_value;
},
@@ -1729,7 +1728,7 @@ fn structInitExprRlPtrInner(
for (struct_init.ast.fields) |field_init| {
const name_token = tree.firstToken(field_init) - 2;
const str_index = try astgen.identAsString(name_token);
const field_ptr = try gz.addPlNode(.field_ptr, field_init, Zir.Inst.Field{
const field_ptr = try gz.addPlNode(.field_ptr_init, field_init, Zir.Inst.Field{
.lhs = result_ptr,
.field_name_start = str_index,
});
@@ -2287,6 +2286,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.elem_ptr_imm,
.elem_val_node,
.field_ptr,
.field_ptr_init,
.field_val,
.field_call_bind,
.field_ptr_named,
@@ -4213,6 +4213,12 @@ fn structDeclInner(
const have_value = member.ast.value_expr != 0;
const is_comptime = member.comptime_token != null;
if (is_comptime and layout == .Packed) {
return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{});
} else if (is_comptime and layout == .Extern) {
return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{});
}
if (!is_comptime) {
known_non_opv = known_non_opv or
nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr);
@@ -6504,8 +6510,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
},
.always => {
// Value is always an error. Emit both error defers and regular defers.
const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
const err_code = try gz.addUnNode(.err_union_code, result, node);
const err_code = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;

View File

@@ -173,6 +173,25 @@ pub fn categorizeOperand(
.shr_exact,
.min,
.max,
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
=> {
const o = air_datas[inst].bin_op;
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -239,6 +258,7 @@ pub fn categorizeOperand(
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.float_to_int_optimized,
.int_to_float,
.get_union_tag,
.clz,
@@ -381,12 +401,12 @@ pub fn categorizeOperand(
if (extra.b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
return .none;
},
.reduce => {
.reduce, .reduce_optimized => {
const reduce = air_datas[inst].reduce;
if (reduce.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.cmp_vector => {
.cmp_vector, .cmp_vector_optimized => {
const extra = air.extraData(Air.VectorCmp, air_datas[inst].ty_pl.payload).data;
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
@@ -701,29 +721,47 @@ fn analyzeInst(
switch (inst_tags[inst]) {
.add,
.add_optimized,
.addwrap,
.addwrap_optimized,
.add_sat,
.sub,
.sub_optimized,
.subwrap,
.subwrap_optimized,
.sub_sat,
.mul,
.mul_optimized,
.mulwrap,
.mulwrap_optimized,
.mul_sat,
.div_float,
.div_float_optimized,
.div_trunc,
.div_trunc_optimized,
.div_floor,
.div_floor_optimized,
.div_exact,
.div_exact_optimized,
.rem,
.rem_optimized,
.mod,
.mod_optimized,
.bit_and,
.bit_or,
.xor,
.cmp_lt,
.cmp_lt_optimized,
.cmp_lte,
.cmp_lte_optimized,
.cmp_eq,
.cmp_eq_optimized,
.cmp_gte,
.cmp_gte_optimized,
.cmp_gt,
.cmp_gt_optimized,
.cmp_neq,
.cmp_neq_optimized,
.bool_and,
.bool_or,
.store,
@@ -794,6 +832,7 @@ fn analyzeInst(
.struct_field_ptr_index_3,
.array_to_slice,
.float_to_int,
.float_to_int_optimized,
.int_to_float,
.get_union_tag,
.clz,
@@ -836,6 +875,7 @@ fn analyzeInst(
.round,
.trunc_float,
.neg,
.neg_optimized,
.cmp_lt_errors_len,
.set_err_return_trace,
=> {
@@ -903,11 +943,11 @@ fn analyzeInst(
const extra = a.air.extraData(Air.Shuffle, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.a, extra.b, .none });
},
.reduce => {
.reduce, .reduce_optimized => {
const reduce = inst_datas[inst].reduce;
return trackOperands(a, new_set, inst, main_tomb, .{ reduce.operand, .none, .none });
},
.cmp_vector => {
.cmp_vector, .cmp_vector_optimized => {
const extra = a.air.extraData(Air.VectorCmp, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
},

View File

@@ -787,7 +787,7 @@ pub const Decl = struct {
const opaque_obj = ty.cast(Type.Payload.Opaque).?.data;
return &opaque_obj.namespace;
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
return &union_obj.namespace;
},
@@ -2704,6 +2704,18 @@ pub const SrcLoc = struct {
else => unreachable,
}
},
.node_offset_field_default => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node_tags = tree.nodes.items(.tag);
const parent_node = src_loc.declRelativeToNodeIndex(node_off);
const full: Ast.full.ContainerField = switch (node_tags[parent_node]) {
.container_field => tree.containerField(parent_node),
.container_field_init => tree.containerFieldInit(parent_node),
else => unreachable,
};
return nodeToSpan(tree, full.ast.value_expr);
},
}
}
@@ -3021,6 +3033,9 @@ pub const LazySrcLoc = union(enum) {
/// The source location points to the tag type of an union or an enum.
/// The Decl is determined contextually.
node_offset_container_tag: i32,
/// The source location points to the default value of a field.
/// The Decl is determined contextually.
node_offset_field_default: i32,
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
@@ -3098,6 +3113,7 @@ pub const LazySrcLoc = union(enum) {
.node_offset_ptr_bitoffset,
.node_offset_ptr_hostsize,
.node_offset_container_tag,
.node_offset_field_default,
=> .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
@@ -5936,6 +5952,58 @@ pub fn argSrc(
return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(full.ast.params[arg_i]));
}
pub fn initSrc(
init_node_offset: i32,
gpa: Allocator,
decl: *Decl,
init_index: usize,
) LazySrcLoc {
@setCold(true);
const tree = decl.getFileScope().getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
decl.getFileScope().sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
const node_tags = tree.nodes.items(.tag);
const node = decl.relativeToNodeIndex(init_node_offset);
var buf: [2]Ast.Node.Index = undefined;
const full = switch (node_tags[node]) {
.array_init_one, .array_init_one_comma => tree.arrayInitOne(buf[0..1], node).ast.elements,
.array_init_dot_two, .array_init_dot_two_comma => tree.arrayInitDotTwo(&buf, node).ast.elements,
.array_init_dot, .array_init_dot_comma => tree.arrayInitDot(node).ast.elements,
.array_init, .array_init_comma => tree.arrayInit(node).ast.elements,
.struct_init_one, .struct_init_one_comma => tree.structInitOne(buf[0..1], node).ast.fields,
.struct_init_dot_two, .struct_init_dot_two_comma => tree.structInitDotTwo(&buf, node).ast.fields,
.struct_init_dot, .struct_init_dot_comma => tree.structInitDot(node).ast.fields,
.struct_init, .struct_init_comma => tree.structInit(node).ast.fields,
else => unreachable,
};
switch (node_tags[node]) {
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
=> return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(full[init_index])),
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
=> return LazySrcLoc{ .node_offset_initializer = decl.nodeIndexToRelative(full[init_index]) },
else => unreachable,
}
}
/// Called from `performAllTheWork`, after all AstGen workers have finished,
/// and before the main semantic analysis loop begins.
pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {

File diff suppressed because it is too large Load Diff

View File

@@ -410,6 +410,8 @@ pub const Inst = struct {
/// to the named field. The field name is stored in string_bytes. Used by a.b syntax.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_ptr,
/// Same as `field_ptr` but used for struct init.
field_ptr_init,
/// Given a struct or object that contains virtual fields, returns the named field.
/// The field name is stored in string_bytes. Used by a.b syntax.
/// This instruction also accepts a pointer.
@@ -1070,6 +1072,7 @@ pub const Inst = struct {
.@"export",
.export_value,
.field_ptr,
.field_ptr_init,
.field_val,
.field_call_bind,
.field_ptr_named,
@@ -1370,6 +1373,7 @@ pub const Inst = struct {
.elem_ptr_imm,
.elem_val_node,
.field_ptr,
.field_ptr_init,
.field_val,
.field_call_bind,
.field_ptr_named,
@@ -1629,6 +1633,7 @@ pub const Inst = struct {
.@"export" = .pl_node,
.export_value = .pl_node,
.field_ptr = .pl_node,
.field_ptr_init = .pl_node,
.field_val = .pl_node,
.field_ptr_named = .pl_node,
.field_val_named = .pl_node,

View File

@@ -729,6 +729,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on

View File

@@ -744,6 +744,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on

View File

@@ -669,6 +669,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on

View File

@@ -681,6 +681,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => @panic("TODO try self.airWrapErrUnionPayload(inst)"),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> @panic("TODO implement optimized float mode"),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on

View File

@@ -1622,6 +1622,30 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.err_return_trace,
.set_err_return_trace,
=> |tag| return self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
};
}

View File

@@ -77,7 +77,7 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
.Union => {
const layout = ty.unionGetLayout(target);
if (layout.payload_size == 0 and layout.tag_size != 0) {
return classifyType(ty.unionTagType().?, target);
return classifyType(ty.unionTagTypeSafety().?, target);
}
if (ty.unionFields().count() > 1) return memory;
return classifyType(ty.unionFields().values()[0].ty, target);
@@ -111,7 +111,7 @@ pub fn scalarType(ty: Type, target: std.Target) Type {
.Union => {
const layout = ty.unionGetLayout(target);
if (layout.payload_size == 0 and layout.tag_size != 0) {
return scalarType(ty.unionTagType().?, target);
return scalarType(ty.unionTagTypeSafety().?, target);
}
std.debug.assert(ty.unionFields().count() == 1);
return scalarType(ty.unionFields().values()[0].ty, target);

View File

@@ -751,6 +751,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on

View File

@@ -504,7 +504,7 @@ pub const DeclGen = struct {
if (field_ty.hasRuntimeBitsIgnoreComptime()) {
try writer.writeAll("&(");
try dg.renderParentPtr(writer, field_ptr.container_ptr, container_ptr_ty);
if (field_ptr.container_ty.tag() == .union_tagged) {
if (field_ptr.container_ty.tag() == .union_tagged or field_ptr.container_ty.tag() == .union_safety_tagged) {
try writer.print(")->payload.{ }", .{fmtIdent(field_name)});
} else {
try writer.print(")->{ }", .{fmtIdent(field_name)});
@@ -842,7 +842,7 @@ pub const DeclGen = struct {
try dg.renderTypecast(writer, ty);
try writer.writeAll("){");
if (ty.unionTagType()) |tag_ty| {
if (ty.unionTagTypeSafety()) |tag_ty| {
if (layout.tag_size != 0) {
try writer.writeAll(".tag = ");
try dg.renderValue(writer, tag_ty, union_obj.tag, location);
@@ -858,7 +858,7 @@ pub const DeclGen = struct {
try writer.print(".{ } = ", .{fmtIdent(field_name)});
try dg.renderValue(writer, field_ty, union_obj.val, location);
}
if (ty.unionTagType()) |_| {
if (ty.unionTagTypeSafety()) |_| {
try writer.writeAll("}");
}
try writer.writeAll("}");
@@ -1110,7 +1110,7 @@ pub const DeclGen = struct {
defer buffer.deinit();
try buffer.appendSlice("typedef ");
if (t.unionTagType()) |tag_ty| {
if (t.unionTagTypeSafety()) |tag_ty| {
const name: CValue = .{ .bytes = "tag" };
try buffer.appendSlice("struct {\n ");
if (layout.tag_size != 0) {
@@ -1134,7 +1134,7 @@ pub const DeclGen = struct {
}
try buffer.appendSlice("} ");
if (t.unionTagType()) |_| {
if (t.unionTagTypeSafety()) |_| {
try buffer.appendSlice("payload;\n} ");
}
@@ -1928,6 +1928,30 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.wasm_memory_size => try airWasmMemorySize(f, inst),
.wasm_memory_grow => try airWasmMemoryGrow(f, inst),
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
.cmp_vector_optimized,
.reduce_optimized,
.float_to_int_optimized,
=> return f.fail("TODO implement optimized float mode", .{}),
// zig fmt: on
};
switch (result_value) {
@@ -3368,7 +3392,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
field_name = fields.keys()[index];
field_val_ty = fields.values()[index].ty;
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const fields = struct_ty.unionFields();
field_name = fields.keys()[index];
field_val_ty = fields.values()[index].ty;
@@ -3383,7 +3407,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
},
else => unreachable,
}
const payload = if (struct_ty.tag() == .union_tagged) "payload." else "";
const payload = if (struct_ty.tag() == .union_tagged or struct_ty.tag() == .union_safety_tagged) "payload." else "";
const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst_ty, .Const);
@@ -3415,7 +3439,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
defer buf.deinit();
const field_name = switch (struct_ty.tag()) {
.@"struct" => struct_ty.structFields().keys()[extra.field_index],
.@"union", .union_tagged => struct_ty.unionFields().keys()[extra.field_index],
.@"union", .union_safety_tagged, .union_tagged => struct_ty.unionFields().keys()[extra.field_index],
.tuple, .anon_struct => blk: {
const tuple = struct_ty.tupleFields();
if (tuple.values[extra.field_index].tag() != .unreachable_value) return CValue.none;
@@ -3425,7 +3449,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
},
else => unreachable,
};
const payload = if (struct_ty.tag() == .union_tagged) "payload." else "";
const payload = if (struct_ty.tag() == .union_tagged or struct_ty.tag() == .union_safety_tagged) "payload." else "";
const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst_ty, .Const);

View File

@@ -3404,7 +3404,7 @@ pub const DeclGen = struct {
if (layout.payload_size == 0) {
return lowerValue(dg, .{
.ty = tv.ty.unionTagType().?,
.ty = tv.ty.unionTagTypeSafety().?,
.val = tag_and_val.tag,
});
}
@@ -3446,7 +3446,7 @@ pub const DeclGen = struct {
}
}
const llvm_tag_value = try lowerValue(dg, .{
.ty = tv.ty.unionTagType().?,
.ty = tv.ty.unionTagTypeSafety().?,
.val = tag_and_val.tag,
});
var fields: [3]*const llvm.Value = undefined;
@@ -3984,21 +3984,21 @@ pub const FuncGen = struct {
for (body) |inst, i| {
const opt_value: ?*const llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst),
.addwrap => try self.airAddWrap(inst),
.add => try self.airAdd(inst, false),
.addwrap => try self.airAddWrap(inst, false),
.add_sat => try self.airAddSat(inst),
.sub => try self.airSub(inst),
.subwrap => try self.airSubWrap(inst),
.sub => try self.airSub(inst, false),
.subwrap => try self.airSubWrap(inst, false),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul => try self.airMul(inst, false),
.mulwrap => try self.airMulWrap(inst, false),
.mul_sat => try self.airMulSat(inst),
.div_float => try self.airDivFloat(inst),
.div_trunc => try self.airDivTrunc(inst),
.div_floor => try self.airDivFloor(inst),
.div_exact => try self.airDivExact(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.div_float => try self.airDivFloat(inst, false),
.div_trunc => try self.airDivTrunc(inst, false),
.div_floor => try self.airDivFloor(inst, false),
.div_exact => try self.airDivExact(inst, false),
.rem => try self.airRem(inst, false),
.mod => try self.airMod(inst, false),
.ptr_add => try self.airPtrAdd(inst),
.ptr_sub => try self.airPtrSub(inst),
.shl => try self.airShl(inst),
@@ -4009,6 +4009,19 @@ pub const FuncGen = struct {
.slice => try self.airSlice(inst),
.mul_add => try self.airMulAdd(inst),
.add_optimized => try self.airAdd(inst, true),
.addwrap_optimized => try self.airAddWrap(inst, true),
.sub_optimized => try self.airSub(inst, true),
.subwrap_optimized => try self.airSubWrap(inst, true),
.mul_optimized => try self.airMul(inst, true),
.mulwrap_optimized => try self.airMulWrap(inst, true),
.div_float_optimized => try self.airDivFloat(inst, true),
.div_trunc_optimized => try self.airDivTrunc(inst, true),
.div_floor_optimized => try self.airDivFloor(inst, true),
.div_exact_optimized => try self.airDivExact(inst, true),
.rem_optimized => try self.airRem(inst, true),
.mod_optimized => try self.airMod(inst, true),
.add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"),
.sub_with_overflow => try self.airOverflow(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"),
.mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"),
@@ -4034,17 +4047,27 @@ pub const FuncGen = struct {
.ceil => try self.airUnaryOp(inst, .ceil),
.round => try self.airUnaryOp(inst, .round),
.trunc_float => try self.airUnaryOp(inst, .trunc),
.neg => try self.airUnaryOp(inst, .neg),
.cmp_eq => try self.airCmp(inst, .eq),
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_gte => try self.airCmp(inst, .gte),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
.cmp_neq => try self.airCmp(inst, .neq),
.neg => try self.airNeg(inst, false),
.neg_optimized => try self.airNeg(inst, true),
.cmp_vector => try self.airCmpVector(inst),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.cmp_eq => try self.airCmp(inst, .eq, false),
.cmp_gt => try self.airCmp(inst, .gt, false),
.cmp_gte => try self.airCmp(inst, .gte, false),
.cmp_lt => try self.airCmp(inst, .lt, false),
.cmp_lte => try self.airCmp(inst, .lte, false),
.cmp_neq => try self.airCmp(inst, .neq, false),
.cmp_eq_optimized => try self.airCmp(inst, .eq, true),
.cmp_gt_optimized => try self.airCmp(inst, .gt, true),
.cmp_gte_optimized => try self.airCmp(inst, .gte, true),
.cmp_lt_optimized => try self.airCmp(inst, .lt, true),
.cmp_lte_optimized => try self.airCmp(inst, .lte, true),
.cmp_neq_optimized => try self.airCmp(inst, .neq, true),
.cmp_vector => try self.airCmpVector(inst, false),
.cmp_vector_optimized => try self.airCmpVector(inst, true),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.is_non_null => try self.airIsNonNull(inst, false, .NE),
.is_non_null_ptr => try self.airIsNonNull(inst, true , .NE),
@@ -4093,8 +4116,10 @@ pub const FuncGen = struct {
.ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0),
.ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1),
.float_to_int => try self.airFloatToInt(inst, false),
.float_to_int_optimized => try self.airFloatToInt(inst, true),
.array_to_slice => try self.airArrayToSlice(inst),
.float_to_int => try self.airFloatToInt(inst),
.int_to_float => try self.airIntToFloat(inst),
.cmpxchg_weak => try self.airCmpxchg(inst, true),
.cmpxchg_strong => try self.airCmpxchg(inst, false),
@@ -4115,11 +4140,13 @@ pub const FuncGen = struct {
.splat => try self.airSplat(inst),
.select => try self.airSelect(inst),
.shuffle => try self.airShuffle(inst),
.reduce => try self.airReduce(inst),
.aggregate_init => try self.airAggregateInit(inst),
.union_init => try self.airUnionInit(inst),
.prefetch => try self.airPrefetch(inst),
.reduce => try self.airReduce(inst, false),
.reduce_optimized => try self.airReduce(inst, true),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
.atomic_store_release => try self.airAtomicStore(inst, .Release),
@@ -4485,8 +4512,9 @@ pub const FuncGen = struct {
return null;
}
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value {
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -4496,8 +4524,9 @@ pub const FuncGen = struct {
return self.cmp(lhs, rhs, operand_ty, op);
}
fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
@@ -4943,10 +4972,12 @@ pub const FuncGen = struct {
return self.builder.buildCall(libc_fn, &params, params.len, .C, .Auto, "");
}
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
self.builder.setFastMath(want_fast_math);
const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6095,8 +6126,9 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, len, 1, "");
}
fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6109,8 +6141,9 @@ pub const FuncGen = struct {
return self.builder.buildNUWAdd(lhs, rhs, "");
}
fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6134,8 +6167,9 @@ pub const FuncGen = struct {
return self.builder.buildUAddSat(lhs, rhs, "");
}
fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6148,8 +6182,9 @@ pub const FuncGen = struct {
return self.builder.buildNUWSub(lhs, rhs, "");
}
fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6172,8 +6207,9 @@ pub const FuncGen = struct {
return self.builder.buildUSubSat(lhs, rhs, "");
}
fn airMul(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6186,8 +6222,9 @@ pub const FuncGen = struct {
return self.builder.buildNUWMul(lhs, rhs, "");
}
fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6210,8 +6247,9 @@ pub const FuncGen = struct {
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6221,8 +6259,9 @@ pub const FuncGen = struct {
return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
}
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6238,8 +6277,9 @@ pub const FuncGen = struct {
return self.builder.buildUDiv(lhs, rhs, "");
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6270,8 +6310,9 @@ pub const FuncGen = struct {
return self.builder.buildUDiv(lhs, rhs, "");
}
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6284,8 +6325,9 @@ pub const FuncGen = struct {
return self.builder.buildExactUDiv(lhs, rhs, "");
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -6298,8 +6340,9 @@ pub const FuncGen = struct {
return self.builder.buildURem(lhs, rhs, "");
}
fn airMod(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7613,6 +7656,17 @@ pub const FuncGen = struct {
return self.buildFloatOp(op, operand_ty, 1, .{operand});
}
fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
return self.buildFloatOp(.neg, operand_ty, 1, .{operand});
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -7927,8 +7981,9 @@ pub const FuncGen = struct {
return self.builder.buildShuffleVector(a, b, llvm_mask_value, "");
}
fn airReduce(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
const reduce = self.air.instructions.items(.data)[inst].reduce;
const operand = try self.resolveInst(reduce.operand);

View File

@@ -941,6 +941,9 @@ pub const Builder = opaque {
pub const buildFPMulReduce = ZigLLVMBuildFPMulReduce;
extern fn ZigLLVMBuildFPMulReduce(B: *const Builder, Acc: *const Value, Val: *const Value) *const Value;
pub const setFastMath = ZigLLVMSetFastMath;
extern fn ZigLLVMSetFastMath(B: *const Builder, on_state: bool) void;
};
pub const MDString = opaque {

View File

@@ -138,6 +138,24 @@ const Writer = struct {
.set_union_tag,
.min,
.max,
.add_optimized,
.addwrap_optimized,
.sub_optimized,
.subwrap_optimized,
.mul_optimized,
.mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
=> try w.writeBinOp(s, inst),
.is_null,
@@ -169,6 +187,7 @@ const Writer = struct {
.round,
.trunc_float,
.neg,
.neg_optimized,
.cmp_lt_errors_len,
.set_err_return_trace,
=> try w.writeUnOp(s, inst),
@@ -216,6 +235,7 @@ const Writer = struct {
.int_to_float,
.splat,
.float_to_int,
.float_to_int_optimized,
.get_union_tag,
.clz,
.ctz,
@@ -280,8 +300,8 @@ const Writer = struct {
.mul_add => try w.writeMulAdd(s, inst),
.select => try w.writeSelect(s, inst),
.shuffle => try w.writeShuffle(s, inst),
.reduce => try w.writeReduce(s, inst),
.cmp_vector => try w.writeCmpVector(s, inst),
.reduce, .reduce_optimized => try w.writeReduce(s, inst),
.cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst),
.dbg_block_begin, .dbg_block_end => {},
}

View File

@@ -390,6 +390,7 @@ const Writer = struct {
.switch_block => try self.writeSwitchBlock(stream, inst),
.field_ptr,
.field_ptr_init,
.field_val,
.field_call_bind,
=> try self.writePlNodeField(stream, inst),

View File

@@ -149,6 +149,7 @@ pub const Type = extern union {
=> return .Enum,
.@"union",
.union_safety_tagged,
.union_tagged,
.type_info,
=> return .Union,
@@ -902,7 +903,7 @@ pub const Type = extern union {
.reduce_op,
=> unreachable, // needed to resolve the type before now
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const a_union_obj = a.cast(Payload.Union).?.data;
const b_union_obj = (b.cast(Payload.Union) orelse return false).data;
return a_union_obj == b_union_obj;
@@ -1210,7 +1211,7 @@ pub const Type = extern union {
.reduce_op,
=> unreachable, // needed to resolve the type before now
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj: *const Module.Union = ty.cast(Payload.Union).?.data;
std.hash.autoHash(hasher, std.builtin.TypeId.Union);
std.hash.autoHash(hasher, union_obj);
@@ -1479,7 +1480,7 @@ pub const Type = extern union {
.error_set_single => return self.copyPayloadShallow(allocator, Payload.Name),
.empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope),
.@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct),
.@"union", .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union),
.@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union),
.enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple),
.enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered),
.enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull),
@@ -1603,7 +1604,7 @@ pub const Type = extern union {
@tagName(t), struct_obj.owner_decl,
});
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return writer.print("({s} decl={d})", .{
@tagName(t), union_obj.owner_decl,
@@ -1989,7 +1990,7 @@ pub const Type = extern union {
const decl = mod.declPtr(struct_obj.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
const decl = mod.declPtr(union_obj.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
@@ -2485,8 +2486,8 @@ pub const Type = extern union {
return false;
}
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) {
return true;
}
@@ -2644,7 +2645,7 @@ pub const Type = extern union {
.optional => ty.isPtrLikeOptional(),
.@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto,
.@"union" => ty.castTag(.@"union").?.data.layout != .Auto,
.@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto,
.union_tagged => false,
};
}
@@ -3050,11 +3051,10 @@ pub const Type = extern union {
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
// TODO pass `true` for have_tag when unions have a safety tag
return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, false);
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, true);
},
@@ -3232,11 +3232,10 @@ pub const Type = extern union {
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
// TODO pass `true` for have_tag when unions have a safety tag
return abiSizeAdvancedUnion(ty, target, strat, union_obj, false);
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return abiSizeAdvancedUnion(ty, target, strat, union_obj, true);
},
@@ -3526,7 +3525,7 @@ pub const Type = extern union {
return try bitSizeAdvanced(int_tag_ty, target, sema_kit);
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
if (sema_kit) |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
const union_obj = ty.cast(Payload.Union).?.data;
assert(union_obj.haveFieldTypes());
@@ -4194,6 +4193,33 @@ pub const Type = extern union {
};
}
/// Same as `unionTagType` but includes safety tag.
/// Codegen should use this version.
pub fn unionTagTypeSafety(ty: Type) ?Type {
return switch (ty.tag()) {
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
assert(union_obj.haveFieldTypes());
return union_obj.tag_ty;
},
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_options,
.prefetch_options,
.export_options,
.extern_options,
.type_info,
=> unreachable, // needed to call resolveTypeFields first
else => null,
};
}
/// Asserts the type is a union; returns the tag type, even if the tag will
/// not be stored at runtime.
pub fn unionTagTypeHypothetical(ty: Type) Type {
@@ -4225,8 +4251,8 @@ pub const Type = extern union {
const union_obj = ty.castTag(.@"union").?.data;
return union_obj.getLayout(target, false);
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.getLayout(target, true);
},
else => unreachable,
@@ -4238,6 +4264,7 @@ pub const Type = extern union {
.tuple, .empty_struct_literal, .anon_struct => .Auto,
.@"struct" => ty.castTag(.@"struct").?.data.layout,
.@"union" => ty.castTag(.@"union").?.data.layout,
.union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout,
.union_tagged => ty.castTag(.union_tagged).?.data.layout,
else => unreachable,
};
@@ -4936,7 +4963,7 @@ pub const Type = extern union {
return null;
}
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
const tag_val = union_obj.tag_ty.onePossibleValue() orelse return null;
const only_field = union_obj.fields.values()[0];
@@ -5114,7 +5141,7 @@ pub const Type = extern union {
}
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
switch (union_obj.requires_comptime) {
.wip, .unknown => unreachable, // This function asserts types already resolved.
@@ -5167,6 +5194,7 @@ pub const Type = extern union {
.empty_struct => self.castTag(.empty_struct).?.data,
.@"opaque" => &self.castTag(.@"opaque").?.data.namespace,
.@"union" => &self.castTag(.@"union").?.data.namespace,
.union_safety_tagged => &self.castTag(.union_safety_tagged).?.data.namespace,
.union_tagged => &self.castTag(.union_tagged).?.data.namespace,
else => null,
@@ -5439,7 +5467,7 @@ pub const Type = extern union {
const struct_obj = ty.castTag(.@"struct").?.data;
return struct_obj.fields.values()[index].ty;
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.fields.values()[index].ty;
},
@@ -5456,7 +5484,7 @@ pub const Type = extern union {
assert(struct_obj.layout != .Packed);
return struct_obj.fields.values()[index].normalAlignment(target);
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.fields.values()[index].normalAlignment(target);
},
@@ -5619,8 +5647,8 @@ pub const Type = extern union {
},
.@"union" => return 0,
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
const layout = union_obj.getLayout(target, true);
if (layout.tag_align >= layout.payload_align) {
// {Tag, Payload}
@@ -5660,7 +5688,7 @@ pub const Type = extern union {
const error_set = ty.castTag(.error_set).?.data;
return error_set.srcLoc(mod);
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.srcLoc(mod);
},
@@ -5686,6 +5714,10 @@ pub const Type = extern union {
}
pub fn getOwnerDecl(ty: Type) Module.Decl.Index {
return ty.getOwnerDeclOrNull() orelse unreachable;
}
pub fn getOwnerDeclOrNull(ty: Type) ?Module.Decl.Index {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
@@ -5704,7 +5736,7 @@ pub const Type = extern union {
const error_set = ty.castTag(.error_set).?.data;
return error_set.owner_decl;
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.owner_decl;
},
@@ -5725,7 +5757,7 @@ pub const Type = extern union {
.type_info,
=> unreachable, // These need to be resolved earlier.
else => unreachable,
else => return null,
}
}
@@ -5748,7 +5780,7 @@ pub const Type = extern union {
const error_set = ty.castTag(.error_set).?.data;
return error_set.node_offset;
},
.@"union", .union_tagged => {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.node_offset;
},
@@ -5893,6 +5925,7 @@ pub const Type = extern union {
@"opaque",
@"struct",
@"union",
union_safety_tagged,
union_tagged,
enum_simple,
enum_numbered,
@@ -6009,7 +6042,7 @@ pub const Type = extern union {
.error_set_single => Payload.Name,
.@"opaque" => Payload.Opaque,
.@"struct" => Payload.Struct,
.@"union", .union_tagged => Payload.Union,
.@"union", .union_safety_tagged, .union_tagged => Payload.Union,
.enum_full, .enum_nonexhaustive => Payload.EnumFull,
.enum_simple => Payload.EnumSimple,
.enum_numbered => Payload.EnumNumbered,

View File

@@ -222,6 +222,7 @@ fn testBytesAlign(b: u8) !void {
test "@alignCast slices" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array align(4) = [_]u32{ 1, 1 };
const slice = array[0..];

View File

@@ -12,8 +12,10 @@ const A = union(enum) {
};
test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
var as = [_]A{
A{ .B = B{ .D = 1 } },

View File

@@ -127,6 +127,7 @@ test "@intToFloat(f80)" {
}
fn testIntToFloat(comptime Int: type, k: Int) !void {
@setRuntimeSafety(false); // TODO
const f = @intToFloat(f80, k);
const i = @floatToInt(Int, f);
try expect(i == k);
@@ -151,6 +152,8 @@ test "@intToFloat(f80)" {
test "@floatToInt" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
try testFloatToInts();
comptime try testFloatToInts();

View File

@@ -377,6 +377,7 @@ fn testBinaryNot(x: u16) !void {
}
test "division" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO

View File

@@ -998,6 +998,9 @@ test "tuple element initialized with fn call" {
}
test "struct with union field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Value = struct {
ref: u32 = 2,
kind: union(enum) {

View File

@@ -412,7 +412,7 @@ test "Type.Union" {
const Untagged = @Type(.{
.Union = .{
.layout = .Auto,
.layout = .Extern,
.tag_type = null,
.fields = &.{
.{ .name = "int", .field_type = i32, .alignment = @alignOf(f32) },

View File

@@ -37,6 +37,7 @@ test "init union with runtime value - floats" {
test "basic unions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
var foo = Foo{ .int = 1 };
try expect(foo.int == 1);
@@ -430,9 +431,11 @@ const Foo1 = union(enum) {
var glbl: Foo1 = undefined;
test "global union with single field is correctly initialized" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
glbl = Foo1{
.f = @typeInfo(Foo1).Union.fields[0].field_type{ .x = 123 },
@@ -473,8 +476,11 @@ test "update the tag value for zero-sized unions" {
}
test "union initializer generates padding only if needed" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const U = union(enum) {
A: u24,
@@ -747,9 +753,11 @@ fn Setter(attr: Attribute) type {
}
test "return union init with void payload" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
fn entry() !void {
@@ -775,6 +783,7 @@ test "@unionInit stored to a const" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
const U = union(enum) {
@@ -937,6 +946,7 @@ test "cast from anonymous struct to union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
const U = union(enum) {
@@ -969,6 +979,7 @@ test "cast from pointer to anonymous struct to pointer to union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
const U = union(enum) {
@@ -1104,6 +1115,8 @@ test "union enum type gets a separate scope" {
test "global variable struct contains union initialized to non-most-aligned field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const T = struct {
const U = union(enum) {

View File

@@ -3,3 +3,4 @@
// target=aarch64-macos
//
// :107:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@@ -5,7 +5,7 @@ comptime {
}
// error
// backend=stage1
// backend=stage2
// target=native
//
// tmp.zig:3:35: error: pointer address 0x1 is not aligned to 4 bytes
// :3:35: error: pointer address 0x1 is not aligned to 4 bytes

View File

@@ -6,3 +6,4 @@ export fn entry() usize { return @sizeOf(@TypeOf(x)); }
// target=native
//
// :1:29: error: struct 'builtin.builtin' has no member named 'bogus'
// :1:1: note: struct declared here

View File

@@ -8,4 +8,4 @@ export fn entry() usize { return @sizeOf(@TypeOf(&f)); }
// backend=stage2
// target=native
//
// :3:6: error: type '[]const u8' has no field or member function named 'copy'
// :3:6: error: no field or member function named 'copy' in '[]const u8'

View File

@@ -11,5 +11,5 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :6:21: error: enum 'tmp.Foo' has no field named 'c'
// :6:21: error: no field named 'c' in enum 'tmp.Foo'
// :1:13: note: enum declared here

View File

@@ -34,5 +34,5 @@ export fn d() void {
// :7:5: error: opaque types have unknown size and therefore cannot be directly embedded in unions
// :19:18: error: opaque types have unknown size and therefore cannot be directly embedded in structs
// :18:22: note: opaque declared here
// :24:18: error: opaque types have unknown size and therefore cannot be directly embedded in structs
// :24:23: error: opaque types have unknown size and therefore cannot be directly embedded in structs
// :23:22: note: opaque declared here

View File

@@ -11,9 +11,8 @@ export fn entry() void {
}
// error
// backend=stage1
// backend=stage2
// target=native
// is_test=1
//
// tmp.zig:7:13: error: duplicate field
// tmp.zig:4:13: note: other field here
// :7:16: error: duplicate field
// :4:16: note: other field here

View File

@@ -0,0 +1,21 @@
const U = union {
comptime a: u32 = 1,
};
const E = enum {
comptime a = 1,
};
const P = packed struct {
comptime a: u32 = 1,
};
const X = extern struct {
comptime a: u32 = 1,
};
// error
// backend=stage2
// target=native
//
// :2:5: error: union fields cannot be marked comptime
// :5:5: error: enum fields cannot be marked comptime
// :8:5: error: packed struct fields cannot be marked comptime
// :11:5: error: extern struct fields cannot be marked comptime

View File

@@ -13,9 +13,50 @@ pub export fn entry1() void {
var s: S = .{};
s.a = T{ .a = 2, .b = 2 };
}
pub export fn entry2() void {
var list = .{ 1, 2, 3 };
var list2 = @TypeOf(list){ .@"0" = 1, .@"1" = 2, .@"2" = 3 };
var list3 = @TypeOf(list){ 1, 2, 4 };
_ = list2;
_ = list3;
}
pub export fn entry3() void {
const U = struct {
comptime foo: u32 = 1,
bar: u32,
fn foo(x: @This()) void {
_ = x;
}
};
_ = U.foo(U{ .foo = 2, .bar = 2 });
}
pub export fn entry4() void {
const U = struct {
comptime foo: u32 = 1,
bar: u32,
fn foo(x: @This()) void {
_ = x;
}
};
_ = U.foo(.{ .foo = 2, .bar = 2 });
}
// pub export fn entry5() void {
// var x: u32 = 15;
// const T = @TypeOf(.{ @as(i32, -1234), @as(u32, 5678), x });
// const S = struct {
// fn foo(_: T) void {}
// };
// _ = S.foo(.{ -1234, 5679, x });
// }
// error
// target=native
// backend=stage2
//
// :6:19: error: value stored in comptime field does not match the default value of the field
// :14:19: error: value stored in comptime field does not match the default value of the field
// :19:38: error: value stored in comptime field does not match the default value of the field
// :31:19: error: value stored in comptime field does not match the default value of the field
// :25:29: note: default value set here
// :41:16: error: value stored in comptime field does not match the default value of the field

View File

@@ -18,4 +18,5 @@ export fn f() void {
// backend=stage2
// target=native
//
// :14:9: error: type 'tmp.Foo' has no field or member function named 'init'
// :14:9: error: no field or member function named 'init' in 'tmp.Foo'
// :1:13: note: struct declared here

View File

@@ -27,4 +27,5 @@ export fn foo() void {
// backend=llvm
// target=native
//
// :23:6: error: type 'tmp.List' has no field or member function named 'init'
// :23:6: error: no field or member function named 'init' in 'tmp.List'
// :1:14: note: struct declared here

View File

@@ -16,5 +16,5 @@ export fn entry() usize {
// error
// target=native
//
// :10:5: error: enum 'tmp.E' has no field named 'd'
// :10:5: error: no field named 'd' in enum 'tmp.E'
// :1:11: note: enum declared here

View File

@@ -13,5 +13,4 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :9:14: error: expected type 'type', found 'tmp.U'
// :1:11: note: union declared here
// :9:8: error: use of undefined value here causes undefined behavior

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "incorrect alignment")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -18,5 +20,5 @@ fn foo(bytes: []u8) u32 {
return int_slice[0];
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "invalid error code")) {
std.process.exit(0);
}
std.process.exit(1);
}
const Set1 = error{A, B};
const Set2 = error{A, C};
@@ -15,5 +17,5 @@ fn foo(set1: Set1) Set2 {
return @errSetCast(Set2, set1);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer part of floating point value out of bounds")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
baz(bar(-129.1));
@@ -14,5 +16,5 @@ fn bar(a: f32) i8 {
}
fn baz(_: i8) void { }
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer part of floating point value out of bounds")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
baz(bar(-1.1));
@@ -14,5 +16,5 @@ fn bar(a: f32) u8 {
}
fn baz(_: u8) void { }
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer part of floating point value out of bounds")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
baz(bar(256.2));
@@ -14,5 +16,5 @@ fn bar(a: f32) u8 {
}
fn baz(_: u8) void { }
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -16,5 +18,5 @@ fn bar(one: u1, not_zero: i32) void {
_ = x;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "cast causes pointer to be null")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var zero: usize = 0;
@@ -12,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "cast causes pointer to be null")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var zero: usize = 0;
@@ -12,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "access of inactive union field")) {
std.process.exit(0);
}
std.process.exit(1);
}
const Foo = union {
@@ -21,5 +23,5 @@ fn bar(f: *Foo) void {
f.float = 12.34;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -12,5 +12,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "invalid error code")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
bar(9999) catch {};
@@ -13,5 +15,5 @@ fn bar(x: u16) anyerror {
return @intToError(x);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "exact division produced remainder")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -17,5 +19,5 @@ fn divExact(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
return @divExact(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "exact division produced remainder")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn divExact(a: i32, b: i32) i32 {
return @divExact(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -14,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -19,5 +19,5 @@ fn add(a: u16, b: u16) u16 {
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "division by zero")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var a: @Vector(4, i32) = [4]i32{111, 222, 333, 444};
@@ -16,5 +18,5 @@ fn div0(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
return @divTrunc(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "division by zero")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
const x = div0(999, 0);
@@ -14,5 +16,5 @@ fn div0(a: i32, b: i32) i32 {
return @divTrunc(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn mul(a: u16, b: u16) u16 {
return a * b;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn neg(a: i16) i16 {
return -a;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn sub(a: u16, b: u16) u16 {
return a - b;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "attempt to use null value")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var ptr: [*c]i32 = null;
@@ -12,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "attempt to use null value")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var ptr: ?*i32 = null;
@@ -12,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "attempt to index out of bound: index 4, len 4")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
const a = [_]i32{1, 2, 3, 4};
@@ -15,5 +17,5 @@ fn bar(a: []const i32) i32 {
}
fn baz(_: i32) void { }
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -0,0 +1,20 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "remainder division by zero or negative value")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
const x = div0(999, -1);
_ = x;
return error.TestFailed;
}
fn div0(a: i32, b: i32) i32 {
return @rem(a, b);
}
// run
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var value: c_short = -1;
@@ -12,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,11 +1,12 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
const x = unsigned_cast(-10);
if (x == 0) return error.Whatever;
@@ -15,5 +16,5 @@ fn unsigned_cast(x: i32) u32 {
return @intCast(u32, x);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "left shift overflowed bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn shl(a: i16, b: u4) i16 {
return @shlExact(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "right shift overflowed bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn shr(a: i16, b: u4) i16 {
return @shrExact(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -16,5 +16,5 @@ pub fn main() !void {
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -16,5 +16,5 @@ pub fn main() !void {
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -0,0 +1,15 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "reached unreachable code")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
unreachable;
}
// run
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var value: u8 = 245;
@@ -12,5 +14,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "left shift overflowed bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn shl(a: u16, b: u4) u16 {
return @shlExact(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "right shift overflowed bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn shr(a: u16, b: u4) u16 {
return @shrExact(a, b);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -16,5 +16,5 @@ pub fn main() !void {
}
// run
// backend=stage1
// backend=llvm
// target=native

View File

@@ -15,5 +15,5 @@ fn bar() !void {
return error.Whatever;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn shorten_cast(x: u8) u0 {
return @intCast(u0, x);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn shorten_cast(x: i32) i8 {
return @intCast(i8, x);
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var a: @Vector(4, i32) = [_]i32{ 1, 2, 2147483643, 4 };
@@ -16,5 +18,5 @@ fn add(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
return a + b;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var a: @Vector(4, u8) = [_]u8{ 1, 2, 200, 4 };
@@ -16,5 +18,5 @@ fn mul(a: @Vector(4, u8), b: @Vector(4, u8)) @Vector(4, u8) {
return a * b;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var a: @Vector(4, i16) = [_]i16{ 1, -32768, 200, 4 };
@@ -15,5 +17,5 @@ fn neg(a: @Vector(4, i16)) @Vector(4, i16) {
return -a;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
if (std.mem.eql(u8, message, "integer overflow")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
var a: @Vector(4, u32) = [_]u32{ 1, 2, 8, 4 };
@@ -16,5 +18,5 @@ fn sub(a: @Vector(4, u32), b: @Vector(4, u32)) @Vector(4, u32) {
return a - b;
}
// run
// backend=stage1
// target=native
// backend=llvm
// target=native

View File

@@ -3,3 +3,4 @@
// target=x86_64-linux
//
// :107:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@@ -3,3 +3,4 @@
// target=x86_64-macos
//
// :107:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@@ -839,7 +839,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = x;
\\}
, &.{
":3:17: error: enum 'tmp.E' has no field named 'd'",
":3:17: error: no field named 'd' in enum 'tmp.E'",
":1:11: note: enum declared here",
});
}