Merge pull request #11671 from ziglang/stage2-behavior

stage2 bug fixes aimed towards more behavior tests passing
This commit is contained in:
Andrew Kelley
2022-05-20 18:35:19 -04:00
committed by GitHub
10 changed files with 1163 additions and 848 deletions

View File

@@ -71,6 +71,7 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
Zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.ExtendedFunc.Bits => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
@@ -2213,6 +2214,14 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
slot.* = @bitCast(u32, flags);
break :b true;
},
.builtin_call => {
const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index;
const slot = &gz.astgen.extra.items[extra_index];
var flags = @bitCast(Zir.Inst.BuiltinCall.Flags, slot.*);
flags.ensure_result_used = true;
slot.* = @bitCast(u32, flags);
break :b true;
},
// ZIR instructions that might be a type other than `noreturn` or `void`.
.add,
@@ -2412,7 +2421,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.atomic_load,
.atomic_rmw,
.mul_add,
.builtin_call,
.field_parent_ptr,
.maximum,
.minimum,
@@ -7502,6 +7510,11 @@ fn builtinCall(
.options = options,
.callee = callee,
.args = args,
.flags = .{
.is_nosuspend = gz.nosuspend_node != 0,
.is_comptime = gz.force_comptime,
.ensure_result_used = false,
},
});
return rvalue(gz, rl, result, node);
},

File diff suppressed because it is too large Load Diff

View File

@@ -232,6 +232,11 @@ pub fn print(
const x = sub_ty.abiAlignment(target);
return writer.print("{d}", .{x});
},
.lazy_size => {
const sub_ty = val.castTag(.lazy_size).?.data;
const x = sub_ty.abiSize(target);
return writer.print("{d}", .{x});
},
.function => return writer.print("(function '{s}')", .{
mod.declPtr(val.castTag(.function).?.data.owner_decl).name,
}),

View File

@@ -72,6 +72,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]),
Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
Inst.ExtendedFunc.Bits => @bitCast(Inst.ExtendedFunc.Bits, code.extra[i]),
else => @compileError("bad field type"),
@@ -280,8 +281,13 @@ pub const Inst = struct {
/// Uses the `break` union field.
break_inline,
/// Function call.
/// Uses `pl_node`. AST node is the function call. Payload is `Call`.
/// Uses the `pl_node` union field with payload `Call`.
/// AST node is the function call.
call,
/// Implements the `@call` builtin.
/// Uses the `pl_node` union field with payload `BuiltinCall`.
/// AST node is the builtin call.
builtin_call,
/// `<`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_lt,
@@ -916,9 +922,6 @@ pub const Inst = struct {
/// The addend communicates the type of the builtin.
/// The mulends need to be coerced to the same type.
mul_add,
/// Implements the `@call` builtin.
/// Uses the `pl_node` union field with payload `BuiltinCall`.
builtin_call,
/// Implements the `@fieldParentPtr` builtin.
/// Uses the `pl_node` union field with payload `FieldParentPtr`.
field_parent_ptr,
@@ -2733,9 +2736,24 @@ pub const Inst = struct {
};
pub const BuiltinCall = struct {
// Note: Flags *must* come first so that unusedResultExpr
// can find it when it goes to modify them.
flags: Flags,
options: Ref,
callee: Ref,
args: Ref,
pub const Flags = packed struct {
is_nosuspend: bool,
is_comptime: bool,
ensure_result_used: bool,
_: u29 = undefined,
comptime {
if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32)
@compileError("Layout of BuiltinCall.Flags needs to be updated!");
}
};
};
/// This data is stored inside extra, with two sets of trailing `Ref`:

View File

@@ -365,7 +365,7 @@ const Writer = struct {
.@"export" => try self.writePlNodeExport(stream, inst),
.export_value => try self.writePlNodeExportValue(stream, inst),
.call => try self.writePlNodeCall(stream, inst),
.call => try self.writeCall(stream, inst),
.block,
.block_inline,
@@ -793,6 +793,11 @@ const Writer = struct {
fn writeBuiltinCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data;
try self.writeFlag(stream, "nodiscard ", extra.flags.ensure_result_used);
try self.writeFlag(stream, "nosuspend ", extra.flags.is_nosuspend);
try self.writeFlag(stream, "comptime ", extra.flags.is_comptime);
try self.writeInstRef(stream, extra.options);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.callee);
@@ -1144,7 +1149,7 @@ const Writer = struct {
try self.writeSrc(stream, src);
}
fn writePlNodeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
fn writeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.Call, inst_data.payload_index);
const args = self.code.refSlice(extra.end, extra.data.flags.args_len);

View File

@@ -2760,7 +2760,7 @@ pub const Type = extern union {
.sema_kit => |sk| sk,
else => null,
};
return switch (ty.tag()) {
switch (ty.tag()) {
.u1,
.u8,
.i8,
@@ -3028,7 +3028,7 @@ pub const Type = extern union {
=> unreachable,
.generic_poison => unreachable,
};
}
}
pub fn abiAlignmentAdvancedUnion(
@@ -3076,10 +3076,37 @@ pub const Type = extern union {
return AbiAlignmentAdvanced{ .scalar = max_align };
}
/// May capture a reference to `ty`.
pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value {
switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) {
.val => |val| return val,
.scalar => |x| return Value.Tag.int_u64.create(arena, x),
}
}
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasRuntimeBits() return 0.
pub fn abiSize(self: Type, target: Target) u64 {
return switch (self.tag()) {
pub fn abiSize(ty: Type, target: Target) u64 {
return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar;
}
const AbiSizeAdvanced = union(enum) {
scalar: u64,
val: Value,
};
/// If you pass `eager` you will get back `scalar` and assert the type is resolved.
/// In this case there will be no error, guaranteed.
/// If you pass `lazy` you may get back `scalar` or `val`.
/// If `val` is returned, a reference to `ty` has been captured.
/// If you pass `sema_kit` you will get back `scalar` and resolve the type if
/// necessary, possibly returning a CompileError.
pub fn abiSizeAdvanced(
ty: Type,
target: Target,
strat: AbiAlignmentAdvancedStrat,
) Module.CompileError!AbiSizeAdvanced {
switch (ty.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
.fn_void_no_args => unreachable, // represents machine code; not a pointer
.fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
@@ -3109,32 +3136,59 @@ pub const Type = extern union {
.empty_struct_literal,
.empty_struct,
.void,
=> 0,
=> return AbiSizeAdvanced{ .scalar = 0 },
.@"struct", .tuple, .anon_struct => switch (self.containerLayout()) {
.@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) {
.Packed => {
const struct_obj = self.castTag(.@"struct").?.data;
const struct_obj = ty.castTag(.@"struct").?.data;
switch (strat) {
.sema_kit => |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty),
.lazy => |arena| {
if (!struct_obj.haveFieldTypes()) {
return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
}
},
.eager => {},
}
var buf: Type.Payload.Bits = undefined;
const int_ty = struct_obj.packedIntegerType(target, &buf);
return int_ty.abiSize(target);
return AbiSizeAdvanced{ .scalar = int_ty.abiSize(target) };
},
else => {
const field_count = self.structFieldCount();
if (field_count == 0) {
return 0;
switch (strat) {
.sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
.lazy => |arena| {
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
if (!struct_obj.haveLayout()) {
return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
}
}
},
.eager => {},
}
return self.structFieldOffset(field_count, target);
const field_count = ty.structFieldCount();
if (field_count == 0) {
return AbiSizeAdvanced{ .scalar = 0 };
}
return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) };
},
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.abiSize(target);
const int_tag_ty = ty.intTagType(&buffer);
return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) };
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
// TODO pass `true` for have_tag when unions have a safety tag
return abiSizeAdvancedUnion(ty, target, strat, union_obj, false);
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
return abiSizeAdvancedUnion(ty, target, strat, union_obj, true);
},
// TODO pass `true` for have_tag when unions have a safety tag
.@"union" => return self.castTag(.@"union").?.data.abiSize(target, false),
.union_tagged => return self.castTag(.union_tagged).?.data.abiSize(target, true),
.u1,
.u8,
@@ -3146,21 +3200,31 @@ pub const Type = extern union {
.address_space,
.float_mode,
.reduce_op,
=> return 1,
=> return AbiSizeAdvanced{ .scalar = 1 },
.array_u8 => self.castTag(.array_u8).?.data,
.array_u8_sentinel_0 => self.castTag(.array_u8_sentinel_0).?.data + 1,
.array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data },
.array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 },
.array, .vector => {
const payload = self.cast(Payload.Array).?.data;
const elem_size = payload.elem_type.abiSize(target);
assert(elem_size >= payload.elem_type.abiAlignment(target));
return payload.len * elem_size;
const payload = ty.cast(Payload.Array).?.data;
switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
.scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size },
.val => switch (strat) {
.sema_kit => unreachable,
.eager => unreachable,
.lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) },
},
}
},
.array_sentinel => {
const payload = self.castTag(.array_sentinel).?.data;
const elem_size = payload.elem_type.abiSize(target);
assert(elem_size >= payload.elem_type.abiAlignment(target));
return (payload.len + 1) * elem_size;
const payload = ty.castTag(.array_sentinel).?.data;
switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
.scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size },
.val => switch (strat) {
.sema_kit => unreachable,
.eager => unreachable,
.lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) },
},
}
},
.isize,
@@ -3178,95 +3242,96 @@ pub const Type = extern union {
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
=> return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
.const_slice,
.mut_slice,
.const_slice_u8,
.const_slice_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
=> return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 },
.pointer => switch (self.castTag(.pointer).?.data.size) {
.Slice => @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
else => @divExact(target.cpu.arch.ptrBitWidth(), 8),
.pointer => switch (ty.castTag(.pointer).?.data.size) {
.Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 },
else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
},
.c_short => return @divExact(CType.short.sizeInBits(target), 8),
.c_ushort => return @divExact(CType.ushort.sizeInBits(target), 8),
.c_int => return @divExact(CType.int.sizeInBits(target), 8),
.c_uint => return @divExact(CType.uint.sizeInBits(target), 8),
.c_long => return @divExact(CType.long.sizeInBits(target), 8),
.c_ulong => return @divExact(CType.ulong.sizeInBits(target), 8),
.c_longlong => return @divExact(CType.longlong.sizeInBits(target), 8),
.c_ulonglong => return @divExact(CType.ulonglong.sizeInBits(target), 8),
.c_short => return AbiSizeAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) },
.c_ushort => return AbiSizeAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) },
.c_int => return AbiSizeAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) },
.c_uint => return AbiSizeAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
.c_long => return AbiSizeAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
.c_ulong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
.c_longlong => return AbiSizeAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
.c_ulonglong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
.f16 => return 2,
.f32 => return 4,
.f64 => return 8,
.f128 => return 16,
.f16 => return AbiSizeAdvanced{ .scalar = 2 },
.f32 => return AbiSizeAdvanced{ .scalar = 4 },
.f64 => return AbiSizeAdvanced{ .scalar = 8 },
.f128 => return AbiSizeAdvanced{ .scalar = 16 },
.f80 => switch (target.cpu.arch) {
.i386 => return 12,
.x86_64 => return 16,
.i386 => return AbiSizeAdvanced{ .scalar = 12 },
.x86_64 => return AbiSizeAdvanced{ .scalar = 16 },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = 80,
};
const u80_ty = initPayload(&payload.base);
return abiSize(u80_ty, target);
return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) };
},
},
.c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
16 => return abiSize(Type.f16, target),
32 => return abiSize(Type.f32, target),
64 => return abiSize(Type.f64, target),
80 => return abiSize(Type.f80, target),
128 => return abiSize(Type.f128, target),
16 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f16, target) },
32 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f32, target) },
64 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f64, target) },
80 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f80, target) },
128 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f128, target) },
else => unreachable,
},
// TODO revisit this when we have the concept of the error tag type
.error_set,
.error_set_single,
.anyerror_void_error_union,
.anyerror,
.error_set_inferred,
.error_set_merged,
=> return 2, // TODO revisit this when we have the concept of the error tag type
=> return AbiSizeAdvanced{ .scalar = 2 },
.i16, .u16 => return intAbiSize(16, target),
.i32, .u32 => return intAbiSize(32, target),
.i64, .u64 => return intAbiSize(64, target),
.u128, .i128 => return intAbiSize(128, target),
.i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) },
.i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) },
.i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) },
.u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) },
.int_signed, .int_unsigned => {
const bits: u16 = self.cast(Payload.Bits).?.data;
if (bits == 0) return 0;
return intAbiSize(bits, target);
const bits: u16 = ty.cast(Payload.Bits).?.data;
if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) };
},
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
if (!child_type.hasRuntimeBits()) return 1;
const child_type = ty.optionalChild(&buf);
if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 };
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) };
// Optional types are represented as a struct with the child type as the first
// field and a boolean as the second. Since the child type's abi alignment is
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
return child_type.abiAlignment(target) + child_type.abiSize(target);
return AbiSizeAdvanced{ .scalar = child_type.abiAlignment(target) + child_type.abiSize(target) };
},
.error_union => {
const data = self.castTag(.error_union).?.data;
const data = ty.castTag(.error_union).?.data;
if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) {
return 0;
return AbiSizeAdvanced{ .scalar = 0 };
} else if (!data.error_set.hasRuntimeBits()) {
return data.payload.abiSize(target);
return AbiSizeAdvanced{ .scalar = data.payload.abiSize(target) };
} else if (!data.payload.hasRuntimeBits()) {
return data.error_set.abiSize(target);
return AbiSizeAdvanced{ .scalar = data.error_set.abiSize(target) };
}
const code_align = abiAlignment(data.error_set, target);
const payload_align = abiAlignment(data.payload, target);
@@ -3278,9 +3343,28 @@ pub const Type = extern union {
size = std.mem.alignForwardGeneric(u64, size, payload_align);
size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, big_align);
return size;
return AbiSizeAdvanced{ .scalar = size };
},
};
}
}
pub fn abiSizeAdvancedUnion(
ty: Type,
target: Target,
strat: AbiAlignmentAdvancedStrat,
union_obj: *Module.Union,
have_tag: bool,
) Module.CompileError!AbiSizeAdvanced {
switch (strat) {
.sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
.lazy => |arena| {
if (!union_obj.haveLayout()) {
return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
}
},
.eager => {},
}
return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) };
}
fn intAbiSize(bits: u16, target: Target) u64 {
@@ -5448,73 +5532,6 @@ pub const Type = extern union {
}
}
/// Asserts the type is an enum.
pub fn enumHasInt(ty: Type, int: Value, mod: *Module) bool {
const S = struct {
fn intInRange(tag_ty: Type, int_val: Value, end: usize, m: *Module) bool {
if (int_val.compareWithZero(.lt)) return false;
var end_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = end,
};
const end_val = Value.initPayload(&end_payload.base);
if (int_val.compare(.gte, end_val, tag_ty, m)) return false;
return true;
}
};
switch (ty.tag()) {
.enum_nonexhaustive => return int.intFitsInType(ty, mod.getTarget()),
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
const tag_ty = enum_full.tag_ty;
if (enum_full.values.count() == 0) {
return S.intInRange(tag_ty, int, enum_full.fields.count(), mod);
} else {
return enum_full.values.containsContext(int, .{
.ty = tag_ty,
.mod = mod,
});
}
},
.enum_numbered => {
const enum_obj = ty.castTag(.enum_numbered).?.data;
const tag_ty = enum_obj.tag_ty;
if (enum_obj.values.count() == 0) {
return S.intInRange(tag_ty, int, enum_obj.fields.count(), mod);
} else {
return enum_obj.values.containsContext(int, .{
.ty = tag_ty,
.mod = mod,
});
}
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
const fields_len = enum_simple.fields.count();
const bits = std.math.log2_int_ceil(usize, fields_len);
var buffer: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = bits,
};
const tag_ty = Type.initPayload(&buffer.base);
return S.intInRange(tag_ty, int, fields_len, mod);
},
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_options,
.prefetch_options,
.export_options,
.extern_options,
=> unreachable,
else => unreachable,
}
}
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types

View File

@@ -179,6 +179,8 @@ pub const Value = extern union {
bound_fn,
/// The ABI alignment of the payload type.
lazy_align,
/// The ABI alignment of the payload type.
lazy_size,
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -289,6 +291,7 @@ pub const Value = extern union {
.ty,
.lazy_align,
.lazy_size,
=> Payload.Ty,
.int_type => Payload.IntType,
@@ -460,7 +463,7 @@ pub const Value = extern union {
.bound_fn,
=> unreachable,
.ty, .lazy_align => {
.ty, .lazy_align, .lazy_size => {
const payload = self.cast(Payload.Ty).?;
const new_payload = try arena.create(Payload.Ty);
new_payload.* = .{
@@ -720,6 +723,11 @@ pub const Value = extern union {
try val.castTag(.lazy_align).?.data.dump("", options, out_stream);
return try out_stream.writeAll(")");
},
.lazy_size => {
try out_stream.writeAll("@sizeOf(");
try val.castTag(.lazy_size).?.data.dump("", options, out_stream);
return try out_stream.writeAll(")");
},
.int_type => {
const int_type = val.castTag(.int_type).?.data;
return out_stream.print("{s}{d}", .{
@@ -1040,6 +1048,14 @@ pub const Value = extern union {
const x = ty.abiAlignment(target);
return BigIntMutable.init(&space.limbs, x).toConst();
},
.lazy_size => {
const ty = val.castTag(.lazy_size).?.data;
if (sema_kit) |sk| {
try sk.sema.resolveTypeLayout(sk.block, sk.src, ty);
}
const x = ty.abiSize(target);
return BigIntMutable.init(&space.limbs, x).toConst();
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
@@ -1087,6 +1103,14 @@ pub const Value = extern union {
return ty.abiAlignment(target);
}
},
.lazy_size => {
const ty = val.castTag(.lazy_size).?.data;
if (sema_kit) |sk| {
return (try ty.abiSizeAdvanced(target, .{ .sema_kit = sk })).scalar;
} else {
return ty.abiSize(target);
}
},
else => return null,
}
@@ -1670,118 +1694,6 @@ pub const Value = extern union {
}
}
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
/// Vectors are also accepted. Vector results are reduced with AND.
pub fn intFitsInType(self: Value, ty: Type, target: Target) bool {
switch (self.tag()) {
.zero,
.undef,
.bool_false,
=> return true,
.one,
.bool_true,
=> switch (ty.zigTypeTag()) {
.Int => {
const info = ty.intInfo(target);
return switch (info.signedness) {
.signed => info.bits >= 2,
.unsigned => info.bits >= 1,
};
},
.ComptimeInt => return true,
else => unreachable,
},
.lazy_align => {
const info = ty.intInfo(target);
const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
// If it is u16 or bigger we know the alignment fits without resolving it.
if (info.bits >= max_needed_bits) return true;
const x = self.castTag(.lazy_align).?.data.abiAlignment(target);
if (x == 0) return true;
const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
return info.bits >= actual_needed_bits;
},
.int_u64 => switch (ty.zigTypeTag()) {
.Int => {
const x = self.castTag(.int_u64).?.data;
if (x == 0) return true;
const info = ty.intInfo(target);
const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
return info.bits >= needed_bits;
},
.ComptimeInt => return true,
else => unreachable,
},
.int_i64 => switch (ty.zigTypeTag()) {
.Int => {
const x = self.castTag(.int_i64).?.data;
if (x == 0) return true;
const info = ty.intInfo(target);
if (info.signedness == .unsigned and x < 0)
return false;
var buffer: BigIntSpace = undefined;
return self.toBigInt(&buffer, target).fitsInTwosComp(info.signedness, info.bits);
},
.ComptimeInt => return true,
else => unreachable,
},
.int_big_positive => switch (ty.zigTypeTag()) {
.Int => {
const info = ty.intInfo(target);
return self.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
},
.ComptimeInt => return true,
else => unreachable,
},
.int_big_negative => switch (ty.zigTypeTag()) {
.Int => {
const info = ty.intInfo(target);
return self.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
},
.ComptimeInt => return true,
else => unreachable,
},
.the_only_possible_value => {
assert(ty.intInfo(target).bits == 0);
return true;
},
.decl_ref_mut,
.extern_fn,
.decl_ref,
.function,
.variable,
=> switch (ty.zigTypeTag()) {
.Int => {
const info = ty.intInfo(target);
const ptr_bits = target.cpu.arch.ptrBitWidth();
return switch (info.signedness) {
.signed => info.bits > ptr_bits,
.unsigned => info.bits >= ptr_bits,
};
},
.ComptimeInt => return true,
else => unreachable,
},
.aggregate => {
assert(ty.zigTypeTag() == .Vector);
for (self.castTag(.aggregate).?.data) |elem| {
if (!elem.intFitsInType(ty.scalarType(), target)) {
return false;
}
}
return true;
},
else => unreachable,
}
}
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
@@ -1849,6 +1761,14 @@ pub const Value = extern union {
return .eq;
}
},
.lazy_size => {
const ty = lhs.castTag(.lazy_size).?.data;
if (try ty.hasRuntimeBitsAdvanced(false, sema_kit)) {
return .gt;
} else {
return .eq;
}
},
.float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0),
.float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0),
@@ -1992,38 +1912,28 @@ pub const Value = extern union {
};
}
/// Asserts the values are comparable vectors of type `ty`.
pub fn compareVector(
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
allocator: Allocator,
mod: *Module,
) !Value {
assert(ty.zigTypeTag() == .Vector);
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod);
scalar.* = makeBool(res_bool);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
/// Asserts the value is comparable.
/// Vector results will be reduced with AND.
pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool {
return compareWithZeroAdvanced(lhs, op, null) catch unreachable;
}
pub fn compareWithZeroAdvanced(
lhs: Value,
op: std.math.CompareOperator,
sema_kit: ?Module.WipAnalysis,
) Module.CompileError!bool {
switch (lhs.tag()) {
.repeated => return lhs.castTag(.repeated).?.data.compareWithZero(op),
.repeated => return lhs.castTag(.repeated).?.data.compareWithZeroAdvanced(op, sema_kit),
.aggregate => {
for (lhs.castTag(.aggregate).?.data) |elem_val| {
if (!elem_val.compareWithZero(op)) return false;
if (!(try elem_val.compareWithZeroAdvanced(op, sema_kit))) return false;
}
return true;
},
else => {},
}
return orderAgainstZero(lhs).compare(op);
return (try orderAgainstZeroAdvanced(lhs, sema_kit)).compare(op);
}
/// This function is used by hash maps and so treats floating-point NaNs as equal
@@ -2032,9 +1942,20 @@ pub const Value = extern union {
/// This function has to be able to support implicit coercion of `a` to `ty`. That is,
/// `ty` will be an exactly correct Type for `b` but it may be a post-coerced Type
/// for `a`. This function must act *as if* `a` has been coerced to `ty`. This complication
/// is required in order to make generic function instantiation effecient - specifically
/// is required in order to make generic function instantiation efficient - specifically
/// the insertion into the monomorphized function table.
pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
return eqlAdvanced(a, b, ty, mod, null) catch unreachable;
}
/// If `null` is provided for `sema_kit` then it is guaranteed no error will be returned.
pub fn eqlAdvanced(
a: Value,
b: Value,
ty: Type,
mod: *Module,
sema_kit: ?Module.WipAnalysis,
) Module.CompileError!bool {
const target = mod.getTarget();
const a_tag = a.tag();
const b_tag = b.tag();
@@ -2055,31 +1976,33 @@ pub const Value = extern union {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
var buffer: Type.Payload.ElemType = undefined;
return eql(a_payload, b_payload, ty.optionalChild(&buffer), mod);
return eqlAdvanced(a_payload, b_payload, ty.optionalChild(&buffer), mod, sema_kit);
},
.slice => {
const a_payload = a.castTag(.slice).?.data;
const b_payload = b.castTag(.slice).?.data;
if (!eql(a_payload.len, b_payload.len, Type.usize, mod)) return false;
if (!(try eqlAdvanced(a_payload.len, b_payload.len, Type.usize, mod, sema_kit))) {
return false;
}
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
return eql(a_payload.ptr, b_payload.ptr, ptr_ty, mod);
return eqlAdvanced(a_payload.ptr, b_payload.ptr, ptr_ty, mod, sema_kit);
},
.elem_ptr => {
const a_payload = a.castTag(.elem_ptr).?.data;
const b_payload = b.castTag(.elem_ptr).?.data;
if (a_payload.index != b_payload.index) return false;
return eql(a_payload.array_ptr, b_payload.array_ptr, ty, mod);
return eqlAdvanced(a_payload.array_ptr, b_payload.array_ptr, ty, mod, sema_kit);
},
.field_ptr => {
const a_payload = a.castTag(.field_ptr).?.data;
const b_payload = b.castTag(.field_ptr).?.data;
if (a_payload.field_index != b_payload.field_index) return false;
return eql(a_payload.container_ptr, b_payload.container_ptr, ty, mod);
return eqlAdvanced(a_payload.container_ptr, b_payload.container_ptr, ty, mod, sema_kit);
},
.@"error" => {
const a_name = a.castTag(.@"error").?.data.name;
@@ -2089,7 +2012,7 @@ pub const Value = extern union {
.eu_payload => {
const a_payload = a.castTag(.eu_payload).?.data;
const b_payload = b.castTag(.eu_payload).?.data;
return eql(a_payload, b_payload, ty.errorUnionPayload(), mod);
return eqlAdvanced(a_payload, b_payload, ty.errorUnionPayload(), mod, sema_kit);
},
.eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
.opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
@@ -2107,7 +2030,9 @@ pub const Value = extern union {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
for (types) |field_ty, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field_ty, mod)) return false;
if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field_ty, mod, sema_kit))) {
return false;
}
}
return true;
}
@@ -2116,7 +2041,9 @@ pub const Value = extern union {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields) |field, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field.ty, mod)) return false;
if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field.ty, mod, sema_kit))) {
return false;
}
}
return true;
}
@@ -2125,7 +2052,9 @@ pub const Value = extern union {
for (a_field_vals) |a_elem, i| {
const b_elem = b_field_vals[i];
if (!eql(a_elem, b_elem, elem_ty, mod)) return false;
if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
return false;
}
}
return true;
},
@@ -2135,7 +2064,7 @@ pub const Value = extern union {
switch (ty.containerLayout()) {
.Packed, .Extern => {
const tag_ty = ty.unionTagTypeHypothetical();
if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) {
if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
// In this case, we must disregard mismatching tags and compare
// based on the in-memory bytes of the payloads.
@panic("TODO comptime comparison of extern union values with mismatching tags");
@@ -2143,13 +2072,13 @@ pub const Value = extern union {
},
.Auto => {
const tag_ty = ty.unionTagTypeHypothetical();
if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) {
if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
return false;
}
},
}
const active_field_ty = ty.unionFieldType(a_union.tag, mod);
return a_union.val.eql(b_union.val, active_field_ty, mod);
return a_union.val.eqlAdvanced(b_union.val, active_field_ty, mod, sema_kit);
},
else => {},
} else if (a_tag == .null_value or b_tag == .null_value) {
@@ -2183,7 +2112,7 @@ pub const Value = extern union {
const b_val = b.enumToInt(ty, &buf_b);
var buf_ty: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf_ty);
return eql(a_val, b_val, int_ty, mod);
return eqlAdvanced(a_val, b_val, int_ty, mod, sema_kit);
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -2194,7 +2123,9 @@ pub const Value = extern union {
while (i < len) : (i += 1) {
const a_elem = elemValueBuffer(a, mod, i, &a_buf);
const b_elem = elemValueBuffer(b, mod, i, &b_buf);
if (!eql(a_elem, b_elem, elem_ty, mod)) return false;
if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
return false;
}
}
return true;
},
@@ -2218,12 +2149,12 @@ pub const Value = extern union {
.base = .{ .tag = .opt_payload },
.data = a,
};
return eql(Value.initPayload(&buffer.base), b, ty, mod);
return eqlAdvanced(Value.initPayload(&buffer.base), b, ty, mod, sema_kit);
}
},
else => {},
}
return order(a, b, target).compare(.eq);
return (try orderAdvanced(a, b, target, sema_kit)).compare(.eq);
}
/// This function is used by hash maps and so treats floating-point NaNs as equal
@@ -2502,6 +2433,7 @@ pub const Value = extern union {
.bool_true,
.the_only_possible_value,
.lazy_align,
.lazy_size,
=> return hashInt(ptr_val, hasher, target),
else => unreachable,
@@ -2882,54 +2814,6 @@ pub const Value = extern union {
}
}
pub fn floatToInt(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
if (float_ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_ty.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try floatToIntScalar(val.indexVectorlike(i), arena, int_ty.scalarType(), target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
return floatToIntScalar(val, arena, int_ty, target);
}
pub fn floatToIntScalar(val: Value, arena: Allocator, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
const Limb = std.math.big.Limb;
var value = val.toFloat(f64); // TODO: f128 ?
if (std.math.isNan(value) or std.math.isInf(value)) {
return error.FloatCannotFit;
}
const isNegative = std.math.signbit(value);
value = @fabs(value);
const floored = @floor(value);
var rational = try std.math.big.Rational.init(arena);
defer rational.deinit();
rational.setFloat(f64, floored) catch |err| switch (err) {
error.NonFiniteFloat => unreachable,
error.OutOfMemory => return error.OutOfMemory,
};
// The float is reduced in rational.setFloat, so we assert that denominator is equal to one
const bigOne = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
assert(rational.q.toConst().eqAbs(bigOne));
const result_limbs = try arena.dupe(Limb, rational.p.toConst().limbs);
const result = if (isNegative)
try Value.Tag.int_big_negative.create(arena, result_limbs)
else
try Value.Tag.int_big_positive.create(arena, result_limbs);
if (result.intFitsInType(int_ty, target)) {
return result;
} else {
return error.FloatCannotFit;
}
}
fn calcLimbLenFloat(scalar: anytype) usize {
if (scalar == 0) {
return 1;
@@ -2945,96 +2829,7 @@ pub const Value = extern union {
wrapped_result: Value,
};
pub fn intAddWithOverflow(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
const of_math_result = try intAddWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
overflowed_data[i] = of_math_result.overflowed;
scalar.* = of_math_result.wrapped_result;
}
return OverflowArithmeticResult{
.overflowed = try Value.Tag.aggregate.create(arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
};
}
return intAddWithOverflowScalar(lhs, rhs, ty, arena, target);
}
pub fn intAddWithOverflowScalar(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, target);
const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const result = try fromBigInt(arena, result_bigint.toConst());
return OverflowArithmeticResult{
.overflowed = makeBool(overflowed),
.wrapped_result = result,
};
}
/// Supports both (vectors of) floats and ints; handles undefined scalars.
pub fn numberAddWrap(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
return numberAddWrapScalar(lhs, rhs, ty, arena, target);
}
/// Supports both floats and ints; handles undefined.
pub fn numberAddWrapScalar(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
return intAdd(lhs, rhs, ty, arena, target);
}
if (ty.isAnyFloat()) {
return floatAdd(lhs, rhs, ty, arena, target);
}
const overflow_result = try intAddWithOverflow(lhs, rhs, ty, arena, target);
return overflow_result.wrapped_result;
}
fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
if (big_int.positive) {
if (big_int.to(u64)) |x| {
return Value.Tag.int_u64.create(arena, x);
@@ -3094,95 +2889,6 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
pub fn intSubWithOverflow(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
const of_math_result = try intSubWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
overflowed_data[i] = of_math_result.overflowed;
scalar.* = of_math_result.wrapped_result;
}
return OverflowArithmeticResult{
.overflowed = try Value.Tag.aggregate.create(arena, overflowed_data),
.wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
};
}
return intSubWithOverflowScalar(lhs, rhs, ty, arena, target);
}
pub fn intSubWithOverflowScalar(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, target);
const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const wrapped_result = try fromBigInt(arena, result_bigint.toConst());
return OverflowArithmeticResult{
.overflowed = makeBool(overflowed),
.wrapped_result = wrapped_result,
};
}
/// Supports both (vectors of) floats and ints; handles undefined scalars.
pub fn numberSubWrap(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
return numberSubWrapScalar(lhs, rhs, ty, arena, target);
}
/// Supports both floats and ints; handles undefined.
pub fn numberSubWrapScalar(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
return intSub(lhs, rhs, ty, arena, target);
}
if (ty.isAnyFloat()) {
return floatSub(lhs, rhs, ty, arena, target);
}
const overflow_result = try intSubWithOverflow(lhs, rhs, ty, arena, target);
return overflow_result.wrapped_result;
}
/// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intSubSat(
lhs: Value,
@@ -3559,60 +3265,6 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
pub fn intAdd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
return intAddScalar(lhs, rhs, allocator, target);
}
pub fn intAddScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, target);
const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try allocator.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.add(lhs_bigint, rhs_bigint);
return fromBigInt(allocator, result_bigint.toConst());
}
pub fn intSub(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
return intSubScalar(lhs, rhs, allocator, target);
}
pub fn intSubScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, target);
const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try allocator.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.sub(lhs_bigint, rhs_bigint);
return fromBigInt(allocator, result_bigint.toConst());
}
pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
@@ -4129,114 +3781,6 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
pub fn floatAdd(
lhs: Value,
rhs: Value,
float_type: Type,
arena: Allocator,
target: Target,
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
return floatAddScalar(lhs, rhs, float_type, arena, target);
}
pub fn floatAddScalar(
lhs: Value,
rhs: Value,
float_type: Type,
arena: Allocator,
target: Target,
) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val + rhs_val);
},
32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val + rhs_val);
},
64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val + rhs_val);
},
80 => {
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, lhs_val + rhs_val);
},
128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val + rhs_val);
},
else => unreachable,
}
}
pub fn floatSub(
lhs: Value,
rhs: Value,
float_type: Type,
arena: Allocator,
target: Target,
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data) |*scalar, i| {
scalar.* = try floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
return floatSubScalar(lhs, rhs, float_type, arena, target);
}
pub fn floatSubScalar(
lhs: Value,
rhs: Value,
float_type: Type,
arena: Allocator,
target: Target,
) !Value {
switch (float_type.floatBits(target)) {
16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val - rhs_val);
},
32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val - rhs_val);
},
64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val - rhs_val);
},
80 => {
const lhs_val = lhs.toFloat(f80);
const rhs_val = rhs.toFloat(f80);
return Value.Tag.float_80.create(arena, lhs_val - rhs_val);
},
128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val - rhs_val);
},
else => unreachable,
}
}
pub fn floatNeg(
val: Value,
float_type: Type,

View File

@@ -19,7 +19,11 @@ test "super basic invocations" {
}
test "basic invocations" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const foo = struct {
fn foo() i32 {
@@ -41,7 +45,10 @@ test "basic invocations" {
}
{
// call of non comptime-known function
var alias_foo = foo;
var alias_foo = switch (builtin.zig_backend) {
.stage1 => foo,
else => &foo,
};
try expect(@call(.{ .modifier = .no_async }, alias_foo, .{}) == 1234);
try expect(@call(.{ .modifier = .never_tail }, alias_foo, .{}) == 1234);
try expect(@call(.{ .modifier = .never_inline }, alias_foo, .{}) == 1234);
@@ -79,26 +86,6 @@ test "tuple parameters" {
}
}
test "comptime call with bound function as parameter" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const S = struct {
fn ReturnType(func: anytype) type {
return switch (@typeInfo(@TypeOf(func))) {
.BoundFn => |info| info,
else => unreachable,
}.return_type orelse void;
}
fn call_me_maybe() ?i32 {
return 123;
}
};
var inst: S = undefined;
try expectEqual(?i32, S.ReturnType(inst.call_me_maybe));
}
test "result location of function call argument through runtime condition and struct init" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@@ -630,7 +630,9 @@ test "vector casts" {
}
test "@floatCast cast down" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
{
var double: f64 = 0.001534;

View File

@@ -169,8 +169,6 @@ test "@bitOffsetOf" {
}
test "@sizeOf(T) == 0 doesn't force resolving struct size" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const S = struct {
const Foo = struct {
y: if (@sizeOf(Foo) == 0) u64 else u32,